cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From wid...@apache.org
Subject [50/50] [abbrv] git commit: updated refs/heads/reporter to 28300b5
Date Wed, 08 Jul 2015 19:20:25 GMT
Proposal of a Usage Reporter / call-home functionality for CloudStack

With this commit the Management Server will be default generate a anonymous Usage
report every 7 (seven) days and submit this information back to the Apache CloudStack project.

These anonymous reports do NOT contain any information about Instance names, subnets, etc.
It only
contains numbers about how CloudStack is being used.

This information is vital for the project to gain more insight in how CloudStack is being
used.

Users can turn the reporting off by setting usage.report.interval to 0 (zero)


Project: http://git-wip-us.apache.org/repos/asf/cloudstack/repo
Commit: http://git-wip-us.apache.org/repos/asf/cloudstack/commit/28300b5d
Tree: http://git-wip-us.apache.org/repos/asf/cloudstack/tree/28300b5d
Diff: http://git-wip-us.apache.org/repos/asf/cloudstack/diff/28300b5d

Branch: refs/heads/reporter
Commit: 28300b5d08c4512743453b67a96af2277f3ee7a2
Parents: 616b011
Author: Wido den Hollander <wido@widodh.nl>
Authored: Wed Jul 8 21:17:30 2015 +0200
Committer: Wido den Hollander <wido@widodh.nl>
Committed: Wed Jul 8 21:17:47 2015 +0200

----------------------------------------------------------------------
 .../src/com/cloud/upgrade/dao/VersionDao.java   |   4 +
 .../com/cloud/upgrade/dao/VersionDaoImpl.java   |   9 +
 pom.xml                                         |   1 +
 reporter/README.md                              |  18 +
 reporter/usage-report-collector.py              |  64 +++
 server/pom.xml                                  |  10 +
 .../spring-server-core-managers-context.xml     |   2 +
 server/src/com/cloud/configuration/Config.java  |   5 +-
 .../cloudstack/report/AtomicGsonAdapter.java    |  48 ++
 .../apache/cloudstack/report/UsageReporter.java | 470 +++++++++++++++++++
 setup/db/db/schema-452to460.sql                 |   2 +
 11 files changed, 632 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
index e280e0b..1a60f36 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDao.java
@@ -16,6 +16,8 @@
 // under the License.
 package com.cloud.upgrade.dao;
 
+import java.util.List;
+
 import com.cloud.upgrade.dao.VersionVO.Step;
 import com.cloud.utils.db.GenericDao;
 
@@ -23,4 +25,6 @@ public interface VersionDao extends GenericDao<VersionVO, Long> {
     VersionVO findByVersion(String version, Step step);
 
     String getCurrentVersion();
+
+    List<VersionVO> getAllVersions();
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
index bc9c2f0..a3e5eb0 100644
--- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
+++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java
@@ -170,4 +170,13 @@ public class VersionDaoImpl extends GenericDaoBase<VersionVO, Long>
implements V
         }
 
     }
+
+    @Override
+    @DB
+    public List<VersionVO> getAllVersions() {
+        SearchCriteria<VersionVO> sc = AllFieldsSearch.create();
+        sc.setParameters("step", "Complete");
+
+        return listBy(sc);
+    }
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index e76d747..0511e91 100644
--- a/pom.xml
+++ b/pom.xml
@@ -64,6 +64,7 @@
     <cs.ehcache.version>2.6.9</cs.ehcache.version>
     <cs.gson.version>1.7.2</cs.gson.version>
     <cs.guava-testlib.version>18.0</cs.guava-testlib.version>
+    <cs.gson.version>2.3.1</cs.gson.version>
     <cs.guava.version>18.0</cs.guava.version>
     <cs.xapi.version>6.2.0-3.1</cs.xapi.version>
     <cs.httpclient.version>4.3.6</cs.httpclient.version>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/reporter/README.md
----------------------------------------------------------------------
diff --git a/reporter/README.md b/reporter/README.md
new file mode 100644
index 0000000..6453fa4
--- /dev/null
+++ b/reporter/README.md
@@ -0,0 +1,18 @@
+# CloudStack Usage Report
+
+This directory contains the CloudStack reporter webservice used by the Apache CloudStack
project
+to gather anonymous statistical information about CloudStack deployments.
+
+Since version <FIX ME!!> the management server sends out a anonymized Usage Report
out to the
+project every 7 days.
+
+This information is used to gain information about how CloudStack is being used.
+
+Turning this Usage Reporting functionality off can be done in the Global Settings by setting
+'usage.report.interval' to 0.
+
+# The webservice
+The Python Flask application in this directory is the webservice running on https://reports.cloudstack.apache.org/
(FIX ME?)
+and stores all the incoming information in a ElasticSearch database.
+
+Since Apache CloudStack is Open Source we show not only how we generate the report, but also
how we process it.

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/reporter/usage-report-collector.py
----------------------------------------------------------------------
diff --git a/reporter/usage-report-collector.py b/reporter/usage-report-collector.py
new file mode 100755
index 0000000..500a4d2
--- /dev/null
+++ b/reporter/usage-report-collector.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+from flask import abort, Flask, request, Response
+from elasticsearch import Elasticsearch
+import json
+import time
+
+def json_response(response):
+    return json.dumps(response, indent=2) + "\n", 200, {'Content-Type': 'application/json;
charset=utf-8'}
+
+def generate_app(config=None):
+    app = Flask(__name__)
+
+    @app.route('/report/<unique_id>', methods=['POST'])
+    def report(unique_id):
+        # We expect JSON data, so if the Content-Type doesn't match JSON data we throw an
error
+        if 'Content-Type' in request.headers:
+            if request.headers['Content-Type'] != 'application/json':
+                abort(417, "No or incorrect Content-Type header was supplied")
+
+        index = "cloudstack-%s" % time.strftime("%Y.%m.%d", time.gmtime())
+        timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
+
+        es = Elasticsearch()
+        es.indices.create(index=index, ignore=400)
+
+        report = json.loads(request.data)
+        report["unique_id"] = unique_id
+        report["timestamp"] = timestamp
+
+        es.index(index=index, doc_type="usage-report", body=json.dumps(report), timestamp=timestamp,
refresh=True)
+
+        response = {}
+        return json_response(response)
+
+    return app
+
+
+app = generate_app()
+
+# Only run the App if this script is invoked from a Shell
+if __name__ == '__main__':
+    app.debug = True
+    app.run(host='0.0.0.0', port=8088)
+
+# Otherwise provide a variable called 'application' for mod_wsgi
+else:
+    application = app

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/server/pom.xml
----------------------------------------------------------------------
diff --git a/server/pom.xml b/server/pom.xml
index e68e678..0b279f4 100644
--- a/server/pom.xml
+++ b/server/pom.xml
@@ -140,6 +140,16 @@
       <artifactId>opensaml</artifactId>
       <version>${cs.opensaml.version}</version>
     </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+      <version>${cs.gson.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <version>${cs.guava.version}</version>
+    </dependency>
   </dependencies>
   <build>
     <testResources>

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
----------------------------------------------------------------------
diff --git a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
index e39d918..594c2b8 100644
--- a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
+++ b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml
@@ -224,6 +224,8 @@
 
     <bean id="statsCollector" class="com.cloud.server.StatsCollector" />
 
+    <bean id="usageReporter" class="org.apache.cloudstack.report.UsageReporter" />
+
     <bean id="storagePoolAutomationImpl" class="com.cloud.storage.StoragePoolAutomationImpl"
/>
 
     <bean id="domainManagerImpl" class="com.cloud.user.DomainManagerImpl" />

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/server/src/com/cloud/configuration/Config.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java
index ca89881..bc2d93c 100644
--- a/server/src/com/cloud/configuration/Config.java
+++ b/server/src/com/cloud/configuration/Config.java
@@ -1999,7 +1999,10 @@ public enum Config {
     // StatsCollector
     StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri",
"", "URI to additionally send StatsCollector statistics to", null),
 
-    SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "",
"PSK with SSVM", null);
+    SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "",
"PSK with SSVM", null),
+
+    // Usage Reporting service
+    UsageReportInterval("Advanced", ManagementServer.class, Integer.class, "usage.report.interval",
"7", "Interval (days) between sending anonymous Usage Reports back to the CloudStack project",
null);
 
     private final String _category;
     private final Class<?> _componentClass;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
----------------------------------------------------------------------
diff --git a/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java b/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
new file mode 100644
index 0000000..23d83f1
--- /dev/null
+++ b/server/src/org/apache/cloudstack/report/AtomicGsonAdapter.java
@@ -0,0 +1,48 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.report;
+
+import com.google.gson.TypeAdapter;
+import com.google.gson.stream.JsonReader;
+import com.google.gson.stream.JsonWriter;
+import com.google.common.util.concurrent.AtomicLongMap;
+import java.util.Map;
+import java.io.IOException;
+
+public class AtomicGsonAdapter extends TypeAdapter<AtomicLongMap> {
+
+    public AtomicLongMap<Object> read(JsonReader reader) throws IOException {
+        reader.nextNull();
+        return null;
+    }
+
+    public void write(JsonWriter writer, AtomicLongMap value) throws IOException {
+        if (value == null) {
+            writer.nullValue();
+            return;
+        }
+
+        @SuppressWarnings("unchecked")
+        Map <String, Long> map = value.asMap();
+
+        writer.beginObject();
+        for (Map.Entry<String, Long> entry : map.entrySet()) {
+            writer.name(entry.getKey()).value(entry.getValue());
+        }
+        writer.endObject();
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/server/src/org/apache/cloudstack/report/UsageReporter.java
----------------------------------------------------------------------
diff --git a/server/src/org/apache/cloudstack/report/UsageReporter.java b/server/src/org/apache/cloudstack/report/UsageReporter.java
new file mode 100644
index 0000000..4656cc4
--- /dev/null
+++ b/server/src/org/apache/cloudstack/report/UsageReporter.java
@@ -0,0 +1,470 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.report;
+
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.HashMap;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.net.URL;
+import java.net.HttpURLConnection;
+import java.net.SocketTimeoutException;
+import java.net.MalformedURLException;
+import java.net.ProtocolException;
+import java.io.OutputStreamWriter;
+import java.io.IOException;
+
+import javax.inject.Inject;
+
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.cloudstack.managed.context.ManagedContextRunnable;
+
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+
+import org.apache.commons.codec.digest.DigestUtils;
+
+import com.cloud.host.HostVO;
+import com.cloud.host.dao.HostDao;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.vm.UserVmVO;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+import com.cloud.utils.db.SearchCriteria;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.component.ManagerBase;
+import com.cloud.utils.component.ComponentMethodInterceptable;
+import com.cloud.utils.concurrency.NamedThreadFactory;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.TransactionLegacy;
+import com.cloud.upgrade.dao.VersionDao;
+import com.cloud.upgrade.dao.VersionVO;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.DiskOfferingVO;
+import com.google.gson.Gson;
+import com.google.gson.GsonBuilder;
+import com.google.common.util.concurrent.AtomicLongMap;
+
+@Component
+public class UsageReporter extends ManagerBase implements ComponentMethodInterceptable {
+    public static final Logger s_logger = Logger.getLogger(UsageReporter.class.getName());
+
+    /* !FIX ME! This should point to a Apache Infra host with SSL! */
+    private String reportHost = "http://cs-report.widodh.nl:8088/report";
+
+    private String uniqueID = null;
+
+    private static UsageReporter s_instance = null;
+
+    private ScheduledExecutorService _executor = null;
+
+    @Inject
+    private ConfigurationDao _configDao;
+    @Inject
+    private HostDao _hostDao;
+    @Inject
+    private ClusterDao _clusterDao;
+    @Inject
+    private PrimaryDataStoreDao _storagePoolDao;
+    @Inject
+    private DataCenterDao _dataCenterDao;
+    @Inject
+    private UserVmDao _userVmDao;
+    @Inject
+    private VMInstanceDao _vmInstance;
+    @Inject
+    private VersionDao _versionDao;
+    @Inject
+    private DiskOfferingDao _diskOfferingDao;
+
+    int usageReportInterval = -1;
+
+    public static UsageReporter getInstance() {
+        return s_instance;
+    }
+
+    public static UsageReporter getInstance(Map<String, String> configs) {
+        s_instance.init(configs);
+        return s_instance;
+    }
+
+    public UsageReporter() {
+        s_instance = this;
+    }
+
+    @Override
+    public boolean start() {
+        init(_configDao.getConfiguration());
+        return true;
+    }
+
+    private void init(Map<String, String> configs) {
+        _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("UsageReporter"));
+
+        usageReportInterval = NumbersUtil.parseInt(configs.get("usage.report.interval"),
7);
+
+        if (usageReportInterval > 0) {
+            _executor.scheduleWithFixedDelay(new UsageCollector(), 7, usageReportInterval,
TimeUnit.DAYS);
+        }
+
+        uniqueID = getUniqueId();
+    }
+
+    private void sendReport(String reportUri, String uniqueID, Map<String, Object>
reportMap) {
+
+        GsonBuilder builder = new GsonBuilder();
+
+        AtomicGsonAdapter adapter = new AtomicGsonAdapter();
+        builder.registerTypeAdapter(AtomicLongMap.class, adapter);
+
+        Gson gson = builder.create();
+        String report = gson.toJson(reportMap);
+
+        int http_timeout = 15000;
+
+        try {
+            s_logger.info("Usage Report will be send to: " + reportUri);
+            s_logger.debug("REPORT: " + report);
+
+            URL url = new URL(reportUri + "/" + uniqueID);
+
+            HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+            conn.setConnectTimeout(http_timeout);
+            conn.setReadTimeout(http_timeout);
+            conn.setRequestMethod("POST");
+            conn.setDoOutput(true);
+            conn.setRequestProperty("Content-Type", "application/json");
+            conn.setRequestProperty("Accept", "application/json");
+
+            OutputStreamWriter osw = new OutputStreamWriter(conn.getOutputStream());
+            osw.write(report);
+            osw.flush();
+            osw.close();
+
+            int resp_code = conn.getResponseCode();
+
+            if (resp_code == HttpURLConnection.HTTP_OK){
+                s_logger.info("Usage Report succesfully send to: " + reportUri);
+            } else {
+                s_logger.warn("Failed to send Usage Report: " + conn.getResponseMessage());
+            }
+
+        } catch (SocketTimeoutException e) {
+            s_logger.warn("Sending Usage Report to " + reportUri + " timed out: " + e.getMessage());
+        } catch (MalformedURLException e) {
+            s_logger.warn(reportUri + " is a invalid URL for sending Usage Report to: "+
e.getMessage());
+        } catch (ProtocolException e) {
+            s_logger.warn("Sending Usage Report failed due to a invalid protocol: " + e.getMessage());
+        } catch (IOException e) {
+            s_logger.warn("Failed to write Usage Report due to a IOException: " + e.getMessage());
+        }
+    }
+
+    @DB
+    private String getUniqueId() {
+        String unique = null;
+        Connection conn = null;
+
+        try {
+            conn = TransactionLegacy.getStandaloneConnection();
+
+            PreparedStatement pstmt = conn.prepareStatement("SELECT version,updated FROM
version ORDER BY id ASC LIMIT 1");
+            ResultSet rs = pstmt.executeQuery();
+            if (rs.next()) {
+                unique = DigestUtils.sha256Hex(rs.getString(1) + rs.getString(2));
+            } else {
+                s_logger.debug("No rows found in the version table. Unable to obtain unique
ID for this environment");
+            }
+
+            rs.close();
+        } catch (SQLException e) {
+            s_logger.debug("Unable to get the unique ID of this environment: " + e.getMessage());
+        } finally {
+            try {
+                conn.close();
+            } catch (SQLException e) {
+            }
+        }
+
+        s_logger.debug("Usage Report Unique ID is: " + unique);
+
+        return unique;
+    }
+
+    private Map<String, AtomicLongMap> getHostReport() {
+        Map<String, AtomicLongMap> hostMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> host_types = AtomicLongMap.create();
+        AtomicLongMap<Object> host_hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> host_version = AtomicLongMap.create();
+
+        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
+        List<HostVO> hosts = _hostDao.search(host_sc, null);
+        for (HostVO host : hosts) {
+            host_types.getAndIncrement(host.getType());
+            if (host.getHypervisorType() != null) {
+                host_hypervisor_type.getAndIncrement(host.getHypervisorType());
+            }
+
+            host_version.getAndIncrement(host.getVersion());
+        }
+
+        hostMap.put("version", host_version);
+        hostMap.put("hypervisor_type", host_hypervisor_type);
+        hostMap.put("type", host_types);
+
+        return hostMap;
+    }
+
+    private Map<String, AtomicLongMap> getClusterReport() {
+        Map<String, AtomicLongMap> clusterMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> cluster_hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> cluster_types = AtomicLongMap.create();
+
+        SearchCriteria<ClusterVO> cluster_sc = _clusterDao.createSearchCriteria();
+        List<ClusterVO> clusters = _clusterDao.search(cluster_sc, null);
+        for (ClusterVO cluster : clusters) {
+            if (cluster.getClusterType() != null) {
+                cluster_types.getAndIncrement(cluster.getClusterType());
+            }
+
+            if (cluster.getHypervisorType() != null) {
+                cluster_hypervisor_type.getAndIncrement(cluster.getHypervisorType());
+            }
+        }
+
+        clusterMap.put("hypervisor_type", cluster_hypervisor_type);
+        clusterMap.put("type", cluster_types);
+
+        return clusterMap;
+    }
+
+    private Map<String, AtomicLongMap> getStoragePoolReport() {
+        Map<String, AtomicLongMap> storagePoolMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> storage_pool_types = AtomicLongMap.create();
+        AtomicLongMap<Object> storage_pool_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> storage_pool_scope = AtomicLongMap.create();
+
+        List<StoragePoolVO> storagePools = _storagePoolDao.listAll();
+        for (StoragePoolVO pool : storagePools) {
+            if (pool.getPoolType() != null) {
+                storage_pool_types.getAndIncrement(pool.getPoolType());
+            }
+
+            if (pool.getStorageProviderName() != null) {
+                storage_pool_provider.getAndIncrement(pool.getStorageProviderName());
+            }
+
+            if (pool.getScope() != null) {
+                storage_pool_scope.getAndIncrement(pool.getScope());
+            }
+        }
+
+        storagePoolMap.put("type", storage_pool_types);
+        storagePoolMap.put("provider", storage_pool_provider);
+        storagePoolMap.put("scope", storage_pool_scope);
+
+        return storagePoolMap;
+    }
+
+    private Map<String, AtomicLongMap> getDataCenterReport() {
+        Map<String, AtomicLongMap> datacenterMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> network_type = AtomicLongMap.create();
+        AtomicLongMap<Object> dns_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> dhcp_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> lb_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> firewall_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> gateway_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> userdata_provider = AtomicLongMap.create();
+        AtomicLongMap<Object> vpn_provider = AtomicLongMap.create();
+
+        List<DataCenterVO> datacenters = _dataCenterDao.listAllZones();
+        for (DataCenterVO datacenter : datacenters) {
+            if (datacenter.getNetworkType() != null) {
+                network_type.getAndIncrement(datacenter.getNetworkType());
+            }
+
+            if (datacenter.getDnsProvider() != null) {
+                dns_provider.getAndIncrement(datacenter.getDnsProvider());
+            }
+
+            if (datacenter.getDhcpProvider() != null) {
+                dhcp_provider.getAndIncrement(datacenter.getDhcpProvider());
+            }
+
+            if (datacenter.getLoadBalancerProvider() != null) {
+                lb_provider.getAndIncrement(datacenter.getLoadBalancerProvider());
+            }
+
+            if (datacenter.getFirewallProvider() != null) {
+                firewall_provider.getAndIncrement(datacenter.getFirewallProvider());
+            }
+
+            if (datacenter.getGatewayProvider() != null) {
+                gateway_provider.getAndIncrement(datacenter.getGatewayProvider());
+            }
+
+            if (datacenter.getUserDataProvider() != null) {
+                userdata_provider.getAndIncrement(datacenter.getUserDataProvider());
+            }
+
+            if (datacenter.getVpnProvider() != null) {
+                vpn_provider.getAndIncrement(datacenter.getVpnProvider());
+            }
+        }
+
+        datacenterMap.put("network_type", network_type);
+        datacenterMap.put("dns_provider", dns_provider);
+        datacenterMap.put("dhcp_provider", dhcp_provider);
+        datacenterMap.put("lb_provider", lb_provider);
+        datacenterMap.put("firewall_provider", firewall_provider);
+        datacenterMap.put("gateway_provider", gateway_provider);
+        datacenterMap.put("userdata_provider", userdata_provider);
+        datacenterMap.put("vpn_provider", vpn_provider);
+
+        return datacenterMap;
+    }
+
+    private Map<String, AtomicLongMap> getInstanceReport() {
+
+        Map<String, AtomicLongMap> instanceMap = new HashMap<String, AtomicLongMap>();
+        AtomicLongMap<Object> hypervisor_type = AtomicLongMap.create();
+        AtomicLongMap<Object> instance_state = AtomicLongMap.create();
+        AtomicLongMap<Object> instance_type = AtomicLongMap.create();
+        AtomicLongMap<Object> ha_enabled = AtomicLongMap.create();
+        AtomicLongMap<Object> dynamically_scalable = AtomicLongMap.create();
+
+        SearchCriteria<HostVO> host_sc = _hostDao.createSearchCriteria();
+        List<HostVO> hosts = _hostDao.search(host_sc, null);
+        for (HostVO host : hosts) {
+            List<UserVmVO> vms = _userVmDao.listByLastHostId(host.getId());
+            for (UserVmVO vm : vms) {
+                VMInstanceVO vmVO = _vmInstance.findById(vm.getId());
+
+                if (vmVO.getHypervisorType() != null) {
+                    hypervisor_type.getAndIncrement(vmVO.getHypervisorType());
+                }
+
+                if (vmVO.getState() != null) {
+                    instance_state.getAndIncrement(vmVO.getState());
+                }
+
+                if (vmVO.getType() != null) {
+                    instance_type.getAndIncrement(vmVO.getType());
+                }
+
+                ha_enabled.getAndIncrement(vmVO.isHaEnabled());
+                dynamically_scalable.getAndIncrement(vmVO.isDynamicallyScalable());
+            }
+        }
+
+        instanceMap.put("hypervisor_type", hypervisor_type);
+        instanceMap.put("state", instance_state);
+        instanceMap.put("type", instance_type);
+        instanceMap.put("ha_enabled", ha_enabled);
+        instanceMap.put("dynamically_scalable", dynamically_scalable);
+
+        return instanceMap;
+    }
+
+    private Map<String, Object> getDiskOfferingReport() {
+        Map<String, Object> diskOfferingReport = new HashMap<String, Object>();
+
+        AtomicLongMap<Object> system_use = AtomicLongMap.create();
+        AtomicLongMap<Object> provisioning_type = AtomicLongMap.create();
+        AtomicLongMap<Object> use_local_storage = AtomicLongMap.create();
+
+        List<DiskOfferingVO> private_offerings = _diskOfferingDao.findPrivateDiskOffering();
+        List<DiskOfferingVO> public_offerings = _diskOfferingDao.findPublicDiskOfferings();
+
+        List<DiskOfferingVO> offerings = new ArrayList<DiskOfferingVO>();
+        offerings.addAll(private_offerings);
+        offerings.addAll(public_offerings);
+
+        long disk_size = 0;
+        for (DiskOfferingVO offering : offerings) {
+            provisioning_type.getAndIncrement(offering.getProvisioningType());
+            system_use.getAndIncrement(offering.getSystemUse());
+            use_local_storage.getAndIncrement(offering.getUseLocalStorage());
+            disk_size += offering.getDiskSize();
+        }
+
+        diskOfferingReport.put("system_use", system_use);
+        diskOfferingReport.put("provisioning_type", provisioning_type);
+        diskOfferingReport.put("use_local_storage", use_local_storage);
+        diskOfferingReport.put("avg_disk_size", disk_size / offerings.size());
+
+        return diskOfferingReport;
+    }
+
+    private Map<String, String> getVersionReport() {
+        Map<String, String> versionMap = new HashMap<String, String>();
+
+        DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'");
+
+        List<VersionVO> versions = _versionDao.getAllVersions();
+        for (VersionVO version : versions) {
+            versionMap.put(version.getVersion(), dateFormat.format(version.getUpdated()));
+        }
+
+        return versionMap;
+    }
+
+    private String getCurrentVersion() {
+        return _versionDao.getCurrentVersion();
+    }
+
+    class UsageCollector extends ManagedContextRunnable {
+        @Override
+        protected void runInContext() {
+            try {
+                s_logger.warn("UsageReporter is running...");
+
+                Map<String, Object> reportMap = new HashMap<String, Object>();
+
+                reportMap.put("hosts", getHostReport());
+                reportMap.put("clusters", getClusterReport());
+                reportMap.put("primaryStorage", getStoragePoolReport());
+                reportMap.put("zones", getDataCenterReport());
+                reportMap.put("instances", getInstanceReport());
+                reportMap.put("diskOffering", getDiskOfferingReport());
+                reportMap.put("versions", getVersionReport());
+                reportMap.put("current_version", getCurrentVersion());
+
+                sendReport(reportHost, uniqueID, reportMap);
+
+            } catch (Exception e) {
+                s_logger.warn("Failed to compile Usage Report: " + e.getMessage());
+            }
+        }
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/28300b5d/setup/db/db/schema-452to460.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-452to460.sql b/setup/db/db/schema-452to460.sql
index 0abd4f8..00204f0 100644
--- a/setup/db/db/schema-452to460.sql
+++ b/setup/db/db/schema-452to460.sql
@@ -398,3 +398,5 @@ CREATE TABLE `cloud`.`external_bigswitch_bcf_devices` (
   CONSTRAINT `fk_external_bigswitch_bcf_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES
`host`(`id`) ON DELETE CASCADE,
   CONSTRAINT `fk_external_bigswitch_bcf_devices__physical_network_id` FOREIGN KEY (`physical_network_id`)
REFERENCES `physical_network`(`id`) ON DELETE CASCADE
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Advanced", 'DEFAULT', 'management-server',
"usage.report.interval", 7, "Interval (days) between sending anonymous Usage Reports back
to the CloudStack project", "", NULL, NULL, 0);


Mime
View raw message