cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bfede...@apache.org
Subject [30/50] [abbrv] AutoScaling without NetScaler
Date Tue, 10 Dec 2013 23:52:10 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/scripts/vm/hypervisor/xenserver/perfmon.py
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/perfmon.py b/scripts/vm/hypervisor/xenserver/perfmon.py
new file mode 100644
index 0000000..c8514b4
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/perfmon.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+
+import pprint
+import XenAPI
+import urllib
+from xml.dom import minidom
+import time
+
+
+# Per VM dictionary (used by RRDUpdates to look up column numbers by variable names)
+class VMReport(dict):
+    """Used internally by RRDUpdates"""
+    def __init__(self, uuid):
+        self.uuid = uuid
+        super(dict, self).__init__()
+
+
+# Per Host dictionary (used by RRDUpdates to look up column numbers by variable names)
+class HostReport(dict):
+    """Used internally by RRDUpdates"""
+    def __init__(self, uuid):
+        self.uuid = uuid
+        super(dict, self).__init__()
+
+
+class PerfMonException(Exception):
+    pass
+
+
+class XmlConfigException(PerfMonException):
+    pass
+
+
+class UsageException(Exception):
+    pass
+
+
+class RRDUpdates:
+    """ Object used to get and parse the output the http://localhost/rrd_udpates?...
+    """
+    def __init__(self):
+        # params are what get passed to the CGI executable in the URL
+        self.params = dict()
+        self.params['start'] = int(time.time()) - 1000  # For demo purposes!
+        self.params['host'] = 'false'   # include data for host (as well as for VMs)
+        self.params['cf'] = 'AVERAGE'  # consolidation function, each sample averages 12 from the 5 second RRD
+        self.params['interval'] = '60'
+
+    def get_nrows(self):
+        return self.rows
+
+    def get_vm_list(self):
+        return self.vm_reports.keys()
+
+    def get_vm_param_list(self, uuid):
+        report = self.vm_reports[uuid]
+        if not report:
+            return []
+        return report.keys()
+
+    def get_total_cpu_core(self, uuid):
+	report = self.vm_reports[uuid]
+        if not report:
+            return 0
+        else:
+            param_keys = report.keys()
+            result = 0
+            for param in param_keys:
+                if "cpu" in param:
+                    result += 1
+            return result
+
+    def get_vm_data(self, uuid, param, row):
+	#pp = pprint.PrettyPrinter(indent=4) 
+	#pp.pprint(self.vm_reports)
+        report = self.vm_reports[uuid]
+        col = report[param]
+        return self.__lookup_data(col, row)
+
+    def get_host_uuid(self):
+        report = self.host_report
+        if not report:
+            return None
+        return report.uuid
+
+    def get_host_param_list(self):
+        report = self.host_report
+        if not report:
+            return []
+        return report.keys()
+
+    def get_host_data(self, param, row):
+        report = self.host_report
+        col = report[param]
+        return self.__lookup_data(col, row)
+
+    def get_row_time(self, row):
+        return self.__lookup_timestamp(row)
+
+    # extract float from value (<v>) node by col,row
+    def __lookup_data(self, col, row):
+        # Note: the <rows> nodes are in reverse chronological order, and comprise
+        # a timestamp <t> node, followed by self.columns data <v> nodes
+        node = self.data_node.childNodes[self.rows - 1 - row].childNodes[col + 1]
+        return float(node.firstChild.toxml())  # node.firstChild should have nodeType TEXT_NODE
+
+    # extract int from value (<t>) node by row
+    def __lookup_timestamp(self, row):
+        # Note: the <rows> nodes are in reverse chronological order, and comprise
+        # a timestamp <t> node, followed by self.columns data <v> nodes
+        node = self.data_node.childNodes[self.rows - 1 - row].childNodes[0]
+        return int(node.firstChild.toxml())  # node.firstChild should have nodeType TEXT_NODE
+
+    def refresh(self, login, starttime, session, override_params):
+        self.params['start'] = starttime
+        params = override_params
+        params['session_id'] = session
+        params.update(self.params)
+        paramstr = "&".join(["%s=%s" % (k, params[k]) for k in params])
+        # this is better than urllib.urlopen() as it raises an Exception on http 401 'Unauthorised' error
+        # rather than drop into interactive mode
+	for host in login.host.get_all():
+		#print "http://" + str(login.host.get_address(host)) + "/rrd_updates?%s" % paramstr
+        	sock = urllib.URLopener().open("http://" + str(login.host.get_address(host)) + "/rrd_updates?%s" % paramstr)
+	        xmlsource = sock.read()
+        	sock.close()
+        	xmldoc = minidom.parseString(xmlsource)
+        	self.__parse_xmldoc(xmldoc)
+        	# Update the time used on the next run
+        	self.params['start'] = self.end_time + 1  # avoid retrieving same data twice
+
+    def __parse_xmldoc(self, xmldoc):
+        # The 1st node contains meta data (description of the data)
+        # The 2nd node contains the data
+        self.meta_node = xmldoc.firstChild.childNodes[0]
+        self.data_node = xmldoc.firstChild.childNodes[1]
+
+        def lookup_metadata_bytag(name):
+            return int(self.meta_node.getElementsByTagName(name)[0].firstChild.toxml())
+            # rows = number of samples per variable
+        # columns = number of variables
+        self.rows = lookup_metadata_bytag('rows')
+        self.columns = lookup_metadata_bytag('columns')
+        # These indicate the period covered by the data
+        self.start_time = lookup_metadata_bytag('start')
+        self.step_time = lookup_metadata_bytag('step')
+        self.end_time = lookup_metadata_bytag('end')
+        # the <legend> Node describes the variables
+        self.legend = self.meta_node.getElementsByTagName('legend')[0]
+        # vm_reports matches uuid to per VM report
+        if not hasattr(self,'vm_reports'):
+		self.vm_reports = {}
+        # There is just one host_report and its uuid should not change!
+        self.host_report = None
+        # Handle each column.  (I.e. each variable)
+        for col in range(self.columns):
+            self.__handle_col(col)
+
+    def __handle_col(self, col):
+        # work out how to interpret col from the legend
+        col_meta_data = self.legend.childNodes[col].firstChild.toxml()
+        # vm_or_host will be 'vm' or 'host'.  Note that the Control domain counts as a VM!
+        (cf, vm_or_host, uuid, param) = col_meta_data.split(':')
+        if vm_or_host == 'vm':
+            # Create a report for this VM if it doesn't exist
+            if not uuid in self.vm_reports:
+                self.vm_reports[uuid] = VMReport(uuid)
+                # Update the VMReport with the col data and meta data
+            vm_report = self.vm_reports[uuid]
+            vm_report[param] = col
+        elif vm_or_host == 'host':
+            # Create a report for the host if it doesn't exist
+            if not self.host_report:
+                self.host_report = HostReport(uuid)
+            elif self.host_report.uuid != uuid:
+                raise PerfMonException("Host UUID changed: (was %s, is %s)" % (self.host_report.uuid, uuid))
+                # Update the HostReport with the col data and meta data
+            self.host_report[param] = col
+        else:
+            raise PerfMonException("Invalid string in <legend>: %s" % col_meta_data)
+
+
+def get_vm_group_perfmon(args={}):
+    #Release code
+    login = XenAPI.xapi_local()
+    login.login_with_password("","")
+    average_cpu = 0
+    average_memory = 0
+    result = ""
+
+    #test code
+    #login = XenAPI.Session(url)
+    #login.login_with_password(username, password)
+    total_vm = int(args['total_vm'])
+    total_counter = int(args['total_counter'])
+    now = int(time.time()) / 60
+
+    # Get pool's info of this host
+    #pool = login.xenapi.pool.get_all()[0]
+    # Get master node's address of pool
+    #master = login.xenapi.pool.get_master(pool)
+    #master_address = login.xenapi.host.get_address(master)
+    session = login._session
+
+    max_duration = 0
+    for counter_count in xrange(1, total_counter + 1):
+        duration = int(args['duration' + str(counter_count)])
+        if duration > max_duration:
+            max_duration = duration
+
+    rrd_updates = RRDUpdates()
+    rrd_updates.refresh(login.xenapi, now * 60 - max_duration, session, {})
+
+    #for uuid in rrd_updates.get_vm_list():
+    for vm_count in xrange(1, total_vm + 1):
+        vm_uuid = args['vmuuid' + str(vm_count)]
+        #print "Got values for VM: " + str(vm_count) + " " + vm_uuid
+        for counter_count in xrange(1, total_counter + 1):
+            counter = args['counter' + str(counter_count)]
+            total_row = rrd_updates.get_nrows()
+            duration = int(args['duration' + str(counter_count)]) / 60
+            duration_diff = total_row - duration
+            #print "param: " + counter
+            if counter == "cpu":
+		total_cpu = rrd_updates.get_total_cpu_core(vm_uuid)
+                for row in xrange(duration_diff, total_row):
+                    for cpu in xrange(0, total_cpu):
+                        average_cpu += rrd_updates.get_vm_data(vm_uuid, "cpu" + str(cpu), row)
+                average_cpu /= (duration * total_cpu)
+                #print "Average CPU: " + str(average_cpu)
+                if result == "":
+                    result += str(vm_count) + '.' +  str(counter_count) + ':' + str(average_cpu)
+                else:
+                    result += ',' + str(vm_count) +  '.' + str(counter_count) + ':' + str(average_cpu)
+            elif counter == "memory":
+                for row in xrange(duration_diff, total_row):
+                    average_memory += rrd_updates.get_vm_data(vm_uuid, "memory_target", row) / 1048576 - rrd_updates.get_vm_data(vm_uuid, "memory_internal_free", row) / 1024
+                average_memory /= duration
+                #print "Average Memory: " + str(average_memory)
+                if result == "":
+                    result += str(vm_count) +  '.' +  str(counter_count) + ':' + str(average_memory)
+                else:
+                    result += ',' + str(vm_count) +  '.' +  str(counter_count) + ':' + str(average_memory)
+    return result
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/scripts/vm/hypervisor/xenserver/vmopspremium
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/vmopspremium b/scripts/vm/hypervisor/xenserver/vmopspremium
index d7d0c6f..348dfa4 100755
--- a/scripts/vm/hypervisor/xenserver/vmopspremium
+++ b/scripts/vm/hypervisor/xenserver/vmopspremium
@@ -28,6 +28,7 @@ if os.path.exists("/usr/lib/xcp/sm"):
     sys.path.extend(["/usr/lib/xcp/sm/", "/usr/local/sbin/", "/sbin/"])
 import util
 import socket
+import perfmon
 
 def echo(fn):
     def wrapped(*v, **k):
@@ -142,6 +143,11 @@ def heartbeat(session, args):
        txt='fail'
     return txt
 
+@echo
+def asmonitor(session, args):
+	result = perfmon.get_vm_group_perfmon(args)
+	return result
+
 if __name__ == "__main__":
-    XenAPIPlugin.dispatch({"forceShutdownVM":forceShutdownVM, "upgrade_snapshot":upgrade_snapshot, "create_privatetemplate_from_snapshot":create_privatetemplate_from_snapshot, "copy_vhd_to_secondarystorage":copy_vhd_to_secondarystorage, "copy_vhd_from_secondarystorage":copy_vhd_from_secondarystorage, "setup_heartbeat_sr":setup_heartbeat_sr, "setup_heartbeat_file":setup_heartbeat_file, "check_heartbeat":check_heartbeat, "heartbeat": heartbeat})
+    XenAPIPlugin.dispatch({"forceShutdownVM":forceShutdownVM, "upgrade_snapshot":upgrade_snapshot, "create_privatetemplate_from_snapshot":create_privatetemplate_from_snapshot, "copy_vhd_to_secondarystorage":copy_vhd_to_secondarystorage, "copy_vhd_from_secondarystorage":copy_vhd_from_secondarystorage, "setup_heartbeat_sr":setup_heartbeat_sr, "setup_heartbeat_file":setup_heartbeat_file, "check_heartbeat":check_heartbeat, "heartbeat": heartbeat, "asmonitor": asmonitor})
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/server/src/com/cloud/network/as/AutoScaleManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/as/AutoScaleManager.java b/server/src/com/cloud/network/as/AutoScaleManager.java
index 5b84b30..9c841d4 100644
--- a/server/src/com/cloud/network/as/AutoScaleManager.java
+++ b/server/src/com/cloud/network/as/AutoScaleManager.java
@@ -19,4 +19,8 @@ package com.cloud.network.as;
 public interface AutoScaleManager extends AutoScaleService {
 
     void cleanUpAutoScaleResources(Long accountId);
+
+	void doScaleUp(long groupId, Integer numVm);
+
+	void doScaleDown(long groupId);
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
index fb7c922..ba95dc0 100644
--- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
+++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
@@ -17,22 +17,24 @@
 package com.cloud.network.as;
 
 import java.security.InvalidParameterException;
+import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 
 import javax.ejb.Local;
 import javax.inject.Inject;
 
-import org.apache.log4j.Logger;
-
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-
 import org.apache.cloudstack.acl.ControlledEntity;
 import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.ApiErrorCode;
+import org.apache.cloudstack.api.BaseCmd.HTTPMethod;
 import org.apache.cloudstack.api.BaseListAccountResourcesCmd;
+import org.apache.cloudstack.api.ServerApiException;
 import org.apache.cloudstack.api.command.admin.autoscale.CreateCounterCmd;
 import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScalePolicyCmd;
 import org.apache.cloudstack.api.command.user.autoscale.CreateAutoScaleVmGroupCmd;
@@ -49,42 +51,55 @@ import org.apache.cloudstack.api.command.user.autoscale.UpdateAutoScaleVmProfile
 import org.apache.cloudstack.api.command.user.vm.DeployVMCmd;
 import org.apache.cloudstack.context.CallContext;
 import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.log4j.Logger;
 
 import com.cloud.api.ApiDBUtils;
 import com.cloud.api.ApiDispatcher;
 import com.cloud.configuration.Config;
 import com.cloud.configuration.ConfigurationManager;
 import com.cloud.dc.DataCenter;
+import com.cloud.dc.DataCenter.NetworkType;
 import com.cloud.dc.dao.DataCenterDao;
 import com.cloud.event.ActionEvent;
 import com.cloud.event.EventTypes;
+import com.cloud.exception.ConcurrentOperationException;
+import com.cloud.exception.InsufficientCapacityException;
+import com.cloud.exception.InsufficientServerCapacityException;
 import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.ResourceAllocationException;
 import com.cloud.exception.ResourceInUseException;
 import com.cloud.exception.ResourceUnavailableException;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.network.Network.Capability;
+import com.cloud.network.Network.IpAddresses;
 import com.cloud.network.as.AutoScaleCounter.AutoScaleCounterParam;
 import com.cloud.network.as.dao.AutoScalePolicyConditionMapDao;
 import com.cloud.network.as.dao.AutoScalePolicyDao;
 import com.cloud.network.as.dao.AutoScaleVmGroupDao;
 import com.cloud.network.as.dao.AutoScaleVmGroupPolicyMapDao;
+import com.cloud.network.as.dao.AutoScaleVmGroupVmMapDao;
 import com.cloud.network.as.dao.AutoScaleVmProfileDao;
 import com.cloud.network.as.dao.ConditionDao;
 import com.cloud.network.as.dao.CounterDao;
 import com.cloud.network.dao.IPAddressDao;
 import com.cloud.network.dao.LoadBalancerDao;
 import com.cloud.network.dao.LoadBalancerVMMapDao;
+import com.cloud.network.dao.LoadBalancerVMMapVO;
 import com.cloud.network.dao.LoadBalancerVO;
 import com.cloud.network.dao.NetworkDao;
 import com.cloud.network.lb.LoadBalancingRulesManager;
+import com.cloud.network.lb.LoadBalancingRulesService;
 import com.cloud.offering.ServiceOffering;
 import com.cloud.projects.Project.ListProjectResourcesCriteria;
 import com.cloud.template.TemplateManager;
 import com.cloud.template.VirtualMachineTemplate;
 import com.cloud.user.Account;
 import com.cloud.user.AccountManager;
+import com.cloud.user.AccountService;
 import com.cloud.user.User;
 import com.cloud.user.dao.AccountDao;
 import com.cloud.user.dao.UserDao;
+import com.cloud.uservm.UserVm;
 import com.cloud.utils.Pair;
 import com.cloud.utils.Ternary;
 import com.cloud.utils.component.ManagerBase;
@@ -100,10 +115,15 @@ import com.cloud.utils.db.Transaction;
 import com.cloud.utils.db.TransactionCallback;
 import com.cloud.utils.db.TransactionStatus;
 import com.cloud.utils.net.NetUtils;
+import com.cloud.vm.UserVmManager;
+import com.cloud.vm.UserVmService;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
 
 @Local(value = {AutoScaleService.class, AutoScaleManager.class})
 public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScaleManager, AutoScaleService {
     private static final Logger s_logger = Logger.getLogger(AutoScaleManagerImpl.class);
+    private ScheduledExecutorService _executor = null;
 
     @Inject
     EntityManager _entityMgr;
@@ -138,6 +158,8 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
     @Inject
     AutoScaleVmGroupPolicyMapDao _autoScaleVmGroupPolicyMapDao;
     @Inject
+    AutoScaleVmGroupVmMapDao _autoScaleVmGroupVmMapDao;
+    @Inject
     DataCenterDao _dcDao = null;
     @Inject
     UserDao _userDao;
@@ -145,6 +167,16 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
     ConfigurationDao _configDao;
     @Inject
     IPAddressDao _ipAddressDao;
+    @Inject
+    AccountService _accountService;
+    @Inject
+    UserVmService _userVmService;
+    @Inject
+    UserVmManager _userVmManager;
+    @Inject 
+    LoadBalancerVMMapDao _LbVmMapDao;
+    @Inject 
+	LoadBalancingRulesService _LoadBalancingRulesService;
 
     public List<AutoScaleCounter> getSupportedAutoScaleCounters(long networkid) {
         String capability = _lbRulesMgr.getLBCapability(networkid, Capability.AutoScaleCounters.getName());
@@ -501,7 +533,7 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
             throw new InvalidParameterValueException("action is invalid, only 'scaleup' and 'scaledown' is supported");
         }
 
-        AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, action);
+        AutoScalePolicyVO policyVO = new AutoScalePolicyVO(cmd.getDomainId(), cmd.getAccountId(), duration, quietTime, null, action);
 
         policyVO = checkValidityAndPersist(policyVO, cmd.getConditionIds());
         s_logger.info("Successfully created AutoScale Policy with Id: " + policyVO.getId());
@@ -705,9 +737,8 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
                 "there are Vms already bound to the specified LoadBalancing Rule. User bound Vms and AutoScaled Vm Group cannot co-exist on a Load Balancing Rule");
         }
 
-        AutoScaleVmGroupVO vmGroupVO =
-            new AutoScaleVmGroupVO(cmd.getLbRuleId(), zoneId, loadBalancer.getDomainId(), loadBalancer.getAccountId(), minMembers, maxMembers,
-                loadBalancer.getDefaultPortStart(), interval, cmd.getProfileId(), AutoScaleVmGroup.State_New);
+        AutoScaleVmGroupVO vmGroupVO = new AutoScaleVmGroupVO(cmd.getLbRuleId(), zoneId, loadBalancer.getDomainId(), loadBalancer.getAccountId(), minMembers, maxMembers,
+                loadBalancer.getDefaultPortStart(), interval, null, cmd.getProfileId(), AutoScaleVmGroup.State_New);
 
         vmGroupVO = checkValidityAndPersist(vmGroupVO, cmd.getScaleUpPolicyIds(), cmd.getScaleDownPolicyIds());
         s_logger.info("Successfully created Autoscale Vm Group with Id: " + vmGroupVO.getId());
@@ -1169,4 +1200,264 @@ public class AutoScaleManagerImpl<Type> extends ManagerBase implements AutoScale
             s_logger.debug("Deleted " + count + " Conditions for account Id: " + accountId);
         }
     }
+
+	private boolean checkConditionUp(AutoScaleVmGroupVO asGroup, Integer numVm) {
+    	// check maximum
+		Integer currentVM = _autoScaleVmGroupVmMapDao.countByGroup(asGroup.getId());
+		Integer maxVm = asGroup.getMaxMembers();
+		if (currentVM + numVm > maxVm) {
+			s_logger.warn("number of VM will greater than the maximum in this group if scaling up, so do nothing more");
+			return false;
+		}
+		return true;
+    }
+	
+	private boolean checkConditionDown(AutoScaleVmGroupVO asGroup) {
+		Integer currentVM = _autoScaleVmGroupVmMapDao.countByGroup(asGroup.getId());
+		Integer minVm = asGroup.getMinMembers();
+		if (currentVM - 1 < minVm) {
+			s_logger.warn("number of VM will less than the minimum in this group if scaling down, so do nothing more");
+			return false;
+		}
+		return true;
+	}
+	
+	private long createNewVM(AutoScaleVmGroupVO asGroup) {
+		AutoScaleVmProfileVO profileVo = _autoScaleVmProfileDao.findById(asGroup.getProfileId());
+		long templateId = profileVo.getTemplateId();
+		long serviceOfferingId = profileVo.getServiceOfferingId();
+		if (templateId == -1) {
+			return -1;
+		}
+		// create new VM into DB
+		try {
+            //Verify that all objects exist before passing them to the service
+            Account owner = _accountService.getActiveAccountById(profileVo.getAccountId());
+
+            DataCenter zone = _entityMgr.findById(DataCenter.class, profileVo.getZoneId());
+            if (zone == null) {
+                throw new InvalidParameterValueException("Unable to find zone by id=" + profileVo.getZoneId());
+            }
+
+            ServiceOffering serviceOffering = _entityMgr.findById(ServiceOffering.class, serviceOfferingId);
+            if (serviceOffering == null) {
+                throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId);
+            }
+
+            VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId);
+            // Make sure a valid template ID was specified
+            if (template == null) {
+                throw new InvalidParameterValueException("Unable to use template " + templateId);
+            }
+
+            if (!zone.isLocalStorageEnabled()) {
+                if (serviceOffering.getUseLocalStorage()) {
+                    throw new InvalidParameterValueException("Zone is not configured to use local storage but service offering " + serviceOffering.getName() + " uses it");
+                }
+            }
+
+            UserVm vm = null;
+			IpAddresses addrs = new IpAddresses(null, null);
+			if (zone.getNetworkType() == NetworkType.Basic) {
+                vm = _userVmService.createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
+                		"autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, null, true, null, null);
+            } else {
+                if (zone.isSecurityGroupEnabled())  {
+                    vm = _userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, null, null,
+                            owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, null, true, null, null);
+
+                } else {
+                    vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
+                            null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null);
+
+                }
+            }
+
+            if (vm != null) {
+				return vm.getId();
+            } else {
+				return -1;
+            }
+        } catch (InsufficientCapacityException ex) {
+            s_logger.info(ex);
+            s_logger.trace(ex.getMessage(), ex);
+            throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage());
+        } catch (ResourceUnavailableException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
+        }  catch (ConcurrentOperationException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        } catch (ResourceAllocationException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.RESOURCE_ALLOCATION_ERROR, ex.getMessage());
+        }
+	}
+	
+	private String getCurrentTimeStampString(){
+		Date current = new Date();
+		SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMddHHmmss");
+		
+		return sdf.format(current);
+	}
+
+	private boolean startNewVM(long vmId) {
+		try {
+            CallContext.current().setEventDetails("Vm Id: "+vmId);
+			_userVmManager.startVirtualMachine(vmId, null, null);
+        } catch (ResourceUnavailableException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage());
+        } catch (ConcurrentOperationException ex) {
+            s_logger.warn("Exception: ", ex);
+            throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage());
+        } catch (InsufficientCapacityException ex) {
+            StringBuilder message = new StringBuilder(ex.getMessage());
+            if (ex instanceof InsufficientServerCapacityException) {
+                if(((InsufficientServerCapacityException)ex).isAffinityApplied()){
+                    message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them");
+                }
+            }
+            s_logger.info(ex);
+            s_logger.info(message.toString(), ex);
+            throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString());
+        }
+		return true;
+	}
+	
+	private boolean assignLBruleToNewVm(long vmId, AutoScaleVmGroupVO asGroup) {
+		List<Long> lstVmId = new ArrayList<Long>();
+    	long lbId = asGroup.getLoadBalancerId();
+    	
+    	List<LoadBalancerVMMapVO>  LbVmMapVos = _LbVmMapDao.listByLoadBalancerId(lbId);
+		if ((LbVmMapVos != null) && (LbVmMapVos.size() > 0)) {
+			for (LoadBalancerVMMapVO LbVmMapVo : LbVmMapVos) {
+				long instanceId = LbVmMapVo.getInstanceId();
+				if (instanceId == vmId) {
+					s_logger.warn("the new VM is already mapped to LB rule. What's wrong?");
+					return true;
+				}
+			}
+    	}
+		lstVmId.add(new Long(vmId));
+		return _LoadBalancingRulesService.assignToLoadBalancer(lbId, lstVmId);
+
+	}
+
+	private long removeLBrule(AutoScaleVmGroupVO asGroup) {
+    	long lbId = asGroup.getLoadBalancerId();
+		long instanceId = -1;
+    	List<LoadBalancerVMMapVO>  LbVmMapVos = _LbVmMapDao.listByLoadBalancerId(lbId);
+		if ((LbVmMapVos != null) && (LbVmMapVos.size() > 0)) {
+			for (LoadBalancerVMMapVO LbVmMapVo : LbVmMapVos) {
+				instanceId = LbVmMapVo.getInstanceId();
+			}
+    	}
+		// take last VM out of the list
+		List<Long> lstVmId = new ArrayList<Long>();
+		if (instanceId != -1)
+			lstVmId.add(instanceId);
+		if (_LoadBalancingRulesService.removeFromLoadBalancer(lbId, lstVmId))
+			return instanceId;
+		else
+			return -1;
+	}
+
+	@Override
+	public void doScaleUp(long groupId, Integer numVm) {
+		AutoScaleVmGroupVO asGroup = _autoScaleVmGroupDao.findById(groupId);
+		if (asGroup == null) {
+			s_logger.error("Can not find the groupid " + groupId + " for scaling up");
+			return;
+		}
+		if (!checkConditionUp(asGroup, numVm)) {
+			return;
+		}
+		for (int i = 0; i < numVm; i++) {
+			long vmId = createNewVM(asGroup);
+			if (vmId == -1) {
+				s_logger.error("Can not deploy new VM for scaling up in the group "
+						+ asGroup.getId() + ". Waiting for next round");
+				break;
+			}
+			if (startNewVM(vmId)) {
+				if (assignLBruleToNewVm(vmId, asGroup)) {
+					// persist to DB
+					AutoScaleVmGroupVmMapVO GroupVmVO = new AutoScaleVmGroupVmMapVO(
+							asGroup.getId(), vmId);
+					_autoScaleVmGroupVmMapDao.persist(GroupVmVO);
+					// update last_quiettime
+					List<AutoScaleVmGroupPolicyMapVO> GroupPolicyVOs = _autoScaleVmGroupPolicyMapDao
+							.listByVmGroupId(groupId);
+					for (AutoScaleVmGroupPolicyMapVO GroupPolicyVO : GroupPolicyVOs) {
+						AutoScalePolicyVO vo = _autoScalePolicyDao
+								.findById(GroupPolicyVO.getPolicyId());
+						if (vo.getAction().equals("scaleup")) {
+							vo.setLastQuiteTime(new Date());
+							_autoScalePolicyDao.persist(vo);
+							break;
+						}
+					}
+				} else {
+					s_logger.error("Can not assign LB rule for this new VM");
+					break;
+				}
+			} else {
+				s_logger.error("Can not deploy new VM for scaling up in the group "
+						+ asGroup.getId() + ". Waiting for next round");
+				break;
+			}
+		}
+	}
+
+	@Override
+	public void doScaleDown(final long groupId) {
+		AutoScaleVmGroupVO asGroup = _autoScaleVmGroupDao.findById(groupId);
+		if (asGroup == null) {
+			s_logger.error("Can not find the groupid " + groupId + " for scaling up");
+			return;
+		}
+		if (!checkConditionDown(asGroup)) {
+			return;
+		}
+		final long vmId = removeLBrule(asGroup);
+		if (vmId != -1) {
+			long profileId = asGroup.getProfileId();
+			// get destroyvmgrace param
+			AutoScaleVmProfileVO asProfile = _autoScaleVmProfileDao.findById(profileId);
+			Integer destroyVmGracePeriod = asProfile.getDestroyVmGraceperiod();
+			if (destroyVmGracePeriod >= 0) {
+				_executor.schedule(new Runnable() {
+					@Override
+					public void run() {
+						try {
+							// destroy vm
+							_userVmManager.destroyVm(vmId);
+							// update group-vm mapping
+							_autoScaleVmGroupVmMapDao.remove(groupId, vmId);
+							// update last_quiettime
+							List<AutoScaleVmGroupPolicyMapVO> GroupPolicyVOs = _autoScaleVmGroupPolicyMapDao
+									.listByVmGroupId(groupId);
+							for (AutoScaleVmGroupPolicyMapVO GroupPolicyVO : GroupPolicyVOs) {
+								AutoScalePolicyVO vo = _autoScalePolicyDao
+										.findById(GroupPolicyVO.getPolicyId());
+								if (vo.getAction().equals("scaledown")) {
+									vo.setLastQuiteTime(new Date());
+									break;
+								}
+							}	
+						} catch (ResourceUnavailableException e) {
+							e.printStackTrace();
+						} catch (ConcurrentOperationException e) {
+							e.printStackTrace();
+						}
+					}
+				}, destroyVmGracePeriod, TimeUnit.SECONDS);
+			}
+		} else {
+			s_logger.error("Can not remove LB rule for the VM being destroyed. Do nothing more.");
+		}
+	}
+	
+
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/server/src/com/cloud/network/element/VirtualRouterElement.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java
index 37d14f2..145c6eb 100755
--- a/server/src/com/cloud/network/element/VirtualRouterElement.java
+++ b/server/src/com/cloud/network/element/VirtualRouterElement.java
@@ -30,11 +30,13 @@ import org.apache.log4j.Logger;
 import com.google.gson.Gson;
 
 import org.apache.cloudstack.api.command.admin.router.ConfigureOvsElementCmd;
+
 import org.apache.cloudstack.api.command.admin.router.ConfigureVirtualRouterElementCmd;
 import org.apache.cloudstack.api.command.admin.router.CreateVirtualRouterElementCmd;
 import org.apache.cloudstack.api.command.admin.router.ListOvsElementsCmd;
 import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd;
 import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
+import org.apache.log4j.Logger;
 
 import com.cloud.agent.api.to.LoadBalancerTO;
 import com.cloud.configuration.ConfigurationManager;
@@ -62,6 +64,8 @@ import com.cloud.network.RemoteAccessVpn;
 import com.cloud.network.VirtualRouterProvider;
 import com.cloud.network.VirtualRouterProvider.Type;
 import com.cloud.network.VpnUser;
+import com.cloud.network.as.AutoScaleCounter;
+import com.cloud.network.as.AutoScaleCounter.AutoScaleCounterType;
 import com.cloud.network.dao.IPAddressDao;
 import com.cloud.network.dao.LoadBalancerDao;
 import com.cloud.network.dao.NetworkDao;
@@ -99,6 +103,7 @@ import com.cloud.vm.VirtualMachine.State;
 import com.cloud.vm.VirtualMachineProfile;
 import com.cloud.vm.dao.DomainRouterDao;
 import com.cloud.vm.dao.UserVmDao;
+import com.google.gson.Gson;
 
 import com.google.gson.Gson;
 
@@ -112,8 +117,9 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl
         LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer,
         NetworkMigrationResponder {
     private static final Logger s_logger = Logger.getLogger(VirtualRouterElement.class);
-
-    protected static final Map<Service, Map<Capability, String>> capabilities = setCapabilities();
+	public static final AutoScaleCounterType AutoScaleCounterCpu = new AutoScaleCounterType("cpu");
+	public static final AutoScaleCounterType AutoScaleCounterMemory = new AutoScaleCounterType("memory");
+	protected static final Map<Service, Map<Capability, String>> capabilities = setCapabilities();
 
     @Inject
     NetworkDao _networksDao;
@@ -555,7 +561,17 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl
         lbCapabilities.put(Capability.SupportedProtocols, "tcp, udp");
         lbCapabilities.put(Capability.SupportedStickinessMethods, getHAProxyStickinessCapability());
         lbCapabilities.put(Capability.LbSchemes, LoadBalancerContainer.Scheme.Public.toString());
-
+        
+        //specifies that LB rules can support autoscaling and the list of counters it supports
+        AutoScaleCounter counter;
+        List<AutoScaleCounter> counterList = new ArrayList<AutoScaleCounter>();
+		counter = new AutoScaleCounter(AutoScaleCounterCpu);
+        counterList.add(counter);
+		counter = new AutoScaleCounter(AutoScaleCounterMemory);
+        counterList.add(counter);
+        Gson gson = new Gson();
+        String autoScaleCounterList = gson.toJson(counterList);
+        lbCapabilities.put(Capability.AutoScaleCounters, autoScaleCounterList);
         capabilities.put(Service.Lb, lbCapabilities);
 
         // Set capabilities for Firewall service

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
index 501bb14..262d341 100755
--- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
+++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java
@@ -915,7 +915,8 @@ public class LoadBalancingRulesManagerImpl<Type> extends ManagerBase implements
         if (provider == null || provider.size() == 0) {
             return false;
         }
-        if (provider.get(0) == Provider.Netscaler || provider.get(0) == Provider.F5BigIp) {
+        if (provider.get(0) == Provider.Netscaler || provider.get(0) == Provider.F5BigIp || 
+        		provider.get(0) == Provider.VirtualRouter || provider.get(0) == Provider.VPCVirtualRouter) {
             return true;
         }
         return false;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/server/src/com/cloud/server/StatsCollector.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java
index 5300105..89685df 100755
--- a/server/src/com/cloud/server/StatsCollector.java
+++ b/server/src/com/cloud/server/StatsCollector.java
@@ -48,6 +48,7 @@ import com.cloud.agent.AgentManager;
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.GetStorageStatsCommand;
 import com.cloud.agent.api.HostStatsEntry;
+import com.cloud.agent.api.PerformanceMonitorCommand;
 import com.cloud.agent.api.VmDiskStatsEntry;
 import com.cloud.agent.api.VmStatsEntry;
 import com.cloud.cluster.ManagementServerHostVO;
@@ -59,8 +60,29 @@ import com.cloud.host.HostVO;
 import com.cloud.host.Status;
 import com.cloud.host.dao.HostDao;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.network.as.AutoScaleManager;
+import com.cloud.network.as.AutoScalePolicyConditionMapVO;
+import com.cloud.network.as.AutoScalePolicyVO;
+import com.cloud.network.as.AutoScaleVmGroupPolicyMapVO;
+import com.cloud.network.as.AutoScaleVmGroupVO;
+import com.cloud.network.as.AutoScaleVmGroupVmMapVO;
+import com.cloud.network.as.AutoScaleVmProfileVO;
+import com.cloud.network.as.Condition.Operator;
+import com.cloud.network.as.ConditionVO;
+import com.cloud.network.as.Counter;
+import com.cloud.network.as.CounterVO;
+import com.cloud.network.as.dao.AutoScalePolicyConditionMapDao;
+import com.cloud.network.as.dao.AutoScalePolicyDao;
+import com.cloud.network.as.dao.AutoScaleVmGroupDao;
+import com.cloud.network.as.dao.AutoScaleVmGroupPolicyMapDao;
+import com.cloud.network.as.dao.AutoScaleVmGroupVmMapDao;
+import com.cloud.network.as.dao.AutoScaleVmProfileDao;
+import com.cloud.network.as.dao.ConditionDao;
+import com.cloud.network.as.dao.CounterDao;
 import com.cloud.resource.ResourceManager;
 import com.cloud.resource.ResourceState;
+import com.cloud.service.ServiceOfferingVO;
+import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.storage.StorageManager;
 import com.cloud.storage.StorageStats;
 import com.cloud.storage.VolumeStats;
@@ -70,6 +92,7 @@ import com.cloud.storage.dao.VolumeDao;
 import com.cloud.user.VmDiskStatisticsVO;
 import com.cloud.user.dao.VmDiskStatisticsDao;
 import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
 import com.cloud.utils.component.ComponentMethodInterceptable;
 import com.cloud.utils.component.ManagerBase;
 import com.cloud.utils.concurrency.NamedThreadFactory;
@@ -82,9 +105,11 @@ import com.cloud.utils.db.TransactionStatus;
 import com.cloud.utils.net.MacAddress;
 import com.cloud.vm.UserVmManager;
 import com.cloud.vm.UserVmVO;
+import com.cloud.vm.VMInstanceVO;
 import com.cloud.vm.VirtualMachine;
 import com.cloud.vm.VmStats;
 import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
 
 /**
  * Provides real time stats for various agent resources up to x seconds
@@ -92,55 +117,53 @@ import com.cloud.vm.dao.UserVmDao;
  */
 @Component
 public class StatsCollector extends ManagerBase implements ComponentMethodInterceptable {
-    public static final Logger s_logger = Logger.getLogger(StatsCollector.class.getName());
-
-    private static StatsCollector s_instance = null;
-
-    private ScheduledExecutorService _executor = null;
-    @Inject
-    private AgentManager _agentMgr;
-    @Inject
-    private UserVmManager _userVmMgr;
-    @Inject
-    private HostDao _hostDao;
-    @Inject
-    private UserVmDao _userVmDao;
-    @Inject
-    private VolumeDao _volsDao;
-    @Inject
-    private PrimaryDataStoreDao _storagePoolDao;
-    @Inject
-    private ImageStoreDao _imageStoreDao;
-    @Inject
-    private StorageManager _storageManager;
-    @Inject
-    private StoragePoolHostDao _storagePoolHostDao;
-    @Inject
-    private DataStoreManager _dataStoreMgr;
-    @Inject
-    private ResourceManager _resourceMgr;
-    @Inject
-    private ConfigurationDao _configDao;
-    @Inject
-    private EndPointSelector _epSelector;
-    @Inject
-    private VmDiskStatisticsDao _vmDiskStatsDao;
-    @Inject
-    private ManagementServerHostDao _msHostDao;
-
-    private ConcurrentHashMap<Long, HostStats> _hostStats = new ConcurrentHashMap<Long, HostStats>();
-    private final ConcurrentHashMap<Long, VmStats> _VmStats = new ConcurrentHashMap<Long, VmStats>();
-    private ConcurrentHashMap<Long, VolumeStats> _volumeStats = new ConcurrentHashMap<Long, VolumeStats>();
-    private ConcurrentHashMap<Long, StorageStats> _storageStats = new ConcurrentHashMap<Long, StorageStats>();
-    private ConcurrentHashMap<Long, StorageStats> _storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
-
-    long hostStatsInterval = -1L;
-    long hostAndVmStatsInterval = -1L;
-    long storageStatsInterval = -1L;
-    long volumeStatsInterval = -1L;
-    int vmDiskStatsInterval = 0;
-
-    private ScheduledExecutorService _diskStatsUpdateExecutor;
+
+	public static final Logger s_logger = Logger.getLogger(StatsCollector.class.getName());
+
+	private static StatsCollector s_instance = null;
+
+	private ScheduledExecutorService _executor = null;
+	@Inject private AgentManager _agentMgr;
+	@Inject private UserVmManager _userVmMgr;
+	@Inject private HostDao _hostDao;
+	@Inject private UserVmDao _userVmDao;
+	@Inject private VolumeDao _volsDao;
+	@Inject private PrimaryDataStoreDao _storagePoolDao;
+	@Inject private ImageStoreDao _imageStoreDao;
+	@Inject private StorageManager _storageManager;
+	@Inject private StoragePoolHostDao _storagePoolHostDao;
+	@Inject private DataStoreManager _dataStoreMgr;
+	@Inject private ResourceManager _resourceMgr;
+    @Inject private ConfigurationDao _configDao;
+    @Inject private EndPointSelector _epSelector;
+    @Inject private VmDiskStatisticsDao _vmDiskStatsDao;
+    @Inject private ManagementServerHostDao _msHostDao;
+	@Inject	private AutoScaleVmGroupDao _asGroupDao;
+	@Inject private AutoScaleVmGroupVmMapDao _asGroupVmDao;
+	@Inject private AutoScaleManager _asManager;
+	@Inject private VMInstanceDao _vmInstance;
+	@Inject private AutoScaleVmGroupPolicyMapDao _asGroupPolicyDao;
+	@Inject private AutoScalePolicyDao _asPolicyDao;
+	@Inject private AutoScalePolicyConditionMapDao _asConditionMapDao; 
+	@Inject private ConditionDao _asConditionDao;
+	@Inject private CounterDao _asCounterDao;
+	@Inject private AutoScaleVmProfileDao _asProfileDao;
+	@Inject private ServiceOfferingDao _serviceOfferingDao;
+
+	private ConcurrentHashMap<Long, HostStats> _hostStats = new ConcurrentHashMap<Long, HostStats>();
+	private final ConcurrentHashMap<Long, VmStats> _VmStats = new ConcurrentHashMap<Long, VmStats>();
+	private ConcurrentHashMap<Long, VolumeStats> _volumeStats = new ConcurrentHashMap<Long, VolumeStats>();
+	private ConcurrentHashMap<Long, StorageStats> _storageStats = new ConcurrentHashMap<Long, StorageStats>();
+	private ConcurrentHashMap<Long, StorageStats> _storagePoolStats = new ConcurrentHashMap<Long, StorageStats>();
+
+	long hostStatsInterval = -1L;
+	long hostAndVmStatsInterval = -1L;
+	long storageStatsInterval = -1L;
+	long volumeStatsInterval = -1L;
+	long autoScaleStatsInterval = -1L;
+	int vmDiskStatsInterval = 0;
+
+	private ScheduledExecutorService _diskStatsUpdateExecutor;
     private int _usageAggregationRange = 1440;
     private String _usageTimeZone = "GMT";
     private final long mgmtSrvrId = MacAddress.getMacAddress().toLong();
@@ -169,14 +192,15 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
         return true;
     }
 
-    private void init(Map<String, String> configs) {
-        _executor = Executors.newScheduledThreadPool(3, new NamedThreadFactory("StatsCollector"));
+	private void init(Map<String, String> configs) {
+		_executor = Executors.newScheduledThreadPool(4, new NamedThreadFactory("StatsCollector"));
 
-        hostStatsInterval = NumbersUtil.parseLong(configs.get("host.stats.interval"), 60000L);
-        hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L);
-        storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L);
-        volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L);
-        vmDiskStatsInterval = NumbersUtil.parseInt(configs.get("vm.disk.stats.interval"), 0);
+		 hostStatsInterval = NumbersUtil.parseLong(configs.get("host.stats.interval"), 60000L);
+		 hostAndVmStatsInterval = NumbersUtil.parseLong(configs.get("vm.stats.interval"), 60000L);
+		 storageStatsInterval = NumbersUtil.parseLong(configs.get("storage.stats.interval"), 60000L);
+		 volumeStatsInterval = NumbersUtil.parseLong(configs.get("volume.stats.interval"), -1L);
+		 autoScaleStatsInterval = NumbersUtil.parseLong(configs.get("autoscale.stats.interval"), 60000L);
+		 vmDiskStatsInterval = NumbersUtil.parseInt(configs.get("vm.disk.stats.interval"), 0);
 
         if (hostStatsInterval > 0) {
             _executor.scheduleWithFixedDelay(new HostCollector(), 15000L, hostStatsInterval, TimeUnit.MILLISECONDS);
@@ -186,9 +210,13 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
             _executor.scheduleWithFixedDelay(new VmStatsCollector(), 15000L, hostAndVmStatsInterval, TimeUnit.MILLISECONDS);
         }
 
-        if (storageStatsInterval > 0) {
-            _executor.scheduleWithFixedDelay(new StorageCollector(), 15000L, storageStatsInterval, TimeUnit.MILLISECONDS);
-        }
+		if (storageStatsInterval > 0) {
+		    _executor.scheduleWithFixedDelay(new StorageCollector(), 15000L, storageStatsInterval, TimeUnit.MILLISECONDS);
+		}
+		 
+		if (autoScaleStatsInterval > 0) {
+            _executor.scheduleWithFixedDelay(new AutoScaleMonitor(), 15000L, autoScaleStatsInterval, TimeUnit.MILLISECONDS);
+		}
 
         if (vmDiskStatsInterval > 0) {
             if (vmDiskStatsInterval < 300)
@@ -578,21 +606,271 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc
                     }
                 }
                 _storagePoolStats = storagePoolStats;
-            } catch (Throwable t) {
-                s_logger.error("Error trying to retrieve storage stats", t);
-            }
-        }
-    }
-
-    public StorageStats getStorageStats(long id) {
-        return _storageStats.get(id);
-    }
-
-    public HostStats getHostStats(long hostId) {
-        return _hostStats.get(hostId);
-    }
-
-    public StorageStats getStoragePoolStats(long id) {
-        return _storagePoolStats.get(id);
-    }
+			} catch (Throwable t) {
+				s_logger.error("Error trying to retrieve storage stats", t);
+			}
+		}
+	}
+
+	class AutoScaleMonitor extends ManagedContextRunnable {
+		@Override
+		protected void runInContext() {
+			try {
+				if (s_logger.isDebugEnabled()) {
+					s_logger.debug("AutoScaling Monitor is running...");
+				}
+				// list all AS VMGroups
+				List<AutoScaleVmGroupVO> asGroups = _asGroupDao.listAll();
+				for (AutoScaleVmGroupVO asGroup : asGroups) {
+					// check group state
+					if (asGroup.getState().equals("enabled")) {
+						// check minimum vm of group
+						Integer currentVM = _asGroupVmDao.countByGroup(asGroup.getId());
+						if (currentVM < asGroup.getMinMembers()) {
+							_asManager.doScaleUp(asGroup.getId(), asGroup.getMinMembers() - currentVM);
+							continue;
+						}
+						
+						//check interval
+						long now = (new Date()).getTime();
+						if (asGroup.getLastInterval() != null)
+							if ((now - asGroup.getLastInterval().getTime()) < (long) asGroup
+									.getInterval()) {
+								continue;
+							}
+
+						// update last_interval
+						asGroup.setLastInterval(new Date());
+						_asGroupDao.persist(asGroup);
+
+						// collect RRDs data for this group
+						if (s_logger.isDebugEnabled()) {
+							s_logger.debug("[AutoScale] Collecting RRDs data...");
+						}
+						Map<String, String> params = new HashMap<String, String>();
+						List <AutoScaleVmGroupVmMapVO> asGroupVmVOs = _asGroupVmDao.listByGroup(asGroup.getId());
+						params.put("total_vm", String.valueOf(asGroupVmVOs.size()));
+						for (int i = 0; i < asGroupVmVOs.size(); i++) {
+							long vmId = asGroupVmVOs.get(i).getInstanceId();
+							VMInstanceVO vmVO = _vmInstance.findById(vmId);
+							//xe vm-list | grep vmname -B 1 | head -n 1 | awk -F':' '{print $2}'
+							params.put("vmname" + String.valueOf(i + 1), vmVO.getInstanceName());
+							params.put("vmid" + String.valueOf(i + 1), String.valueOf(vmVO.getId()));
+
+						}
+						// get random hostid because all vms are in a cluster
+						long vmId = asGroupVmVOs.get(0).getInstanceId();
+						VMInstanceVO vmVO = _vmInstance.findById(vmId);
+						Long receiveHost = vmVO.getHostId();
+
+						// setup parameters phase: duration and counter
+						// list pair [counter, duration]
+						List<Pair<String, Integer>> lstPair = getPairofCounternameAndDuration(asGroup.getId());
+						int total_counter = 0;
+						String[] lstCounter = new String[lstPair.size()];
+						for (int i = 0; i < lstPair.size(); i++) {
+							Pair<String, Integer> pair = lstPair.get(i);
+							String strCounterNames = pair.first();
+							Integer duration = pair.second();
+							
+							lstCounter[i] = strCounterNames.split(",")[0];
+							total_counter++;
+							params.put("duration" + String.valueOf(total_counter), duration.toString());
+							params.put("counter" + String.valueOf(total_counter), lstCounter[i]);
+							params.put("con" + String.valueOf(total_counter), strCounterNames.split(",")[1]);
+						}
+						params.put("total_counter", String.valueOf(total_counter));
+
+						PerformanceMonitorCommand perfMon = new PerformanceMonitorCommand(params, 20);
+
+						try {
+							Answer answer = _agentMgr.send(receiveHost, perfMon);
+							if (answer == null || !answer.getResult()) {
+								s_logger.debug("Failed to send data to node !");
+							} else {
+								String result = answer.getDetails();
+								s_logger.debug("[AutoScale] RRDs collection answer: " + result);
+								HashMap<Long, Double> avgCounter = new HashMap<Long, Double>();
+								
+								// extract data
+								String[] counterElements = result.split(",");
+								if ((counterElements != null) && (counterElements.length > 0)) {
+									for (String string : counterElements) {
+										try {
+											String[] counterVals = string.split(":");
+											String[] counter_vm = counterVals[0].split("\\.");
+											
+											Long counterId = Long.parseLong(counter_vm[1]);
+											Long conditionId = Long.parseLong(params.get("con" + counter_vm[1]));
+											Double coVal = Double.parseDouble(counterVals[1]);
+
+											// Summary of all counter by counterId key
+											if (avgCounter.get(counterId) == null){													
+												/* initialize if data is not set */
+												avgCounter.put(counterId, new Double(0));
+											}
+											
+											String counterName = getCounternamebyCondition(conditionId.longValue());
+											if (counterName == Counter.Source.memory.toString()) {
+												// calculate memory in percent
+												Long profileId = asGroup.getProfileId();
+												AutoScaleVmProfileVO profileVo = _asProfileDao.findById(profileId);
+												ServiceOfferingVO serviceOff = _serviceOfferingDao.findById(profileVo.getServiceOfferingId());
+												int maxRAM = serviceOff.getRamSize();
+
+												// get current RAM percent
+												coVal = coVal / maxRAM;
+											} else {
+												// cpu
+												coVal = coVal * 100;
+											}
+											
+											// update data entry
+											avgCounter.put(counterId, avgCounter.get(counterId)+ coVal);
+											
+										} catch (Exception e) {
+											e.printStackTrace();
+										}
+									}
+
+									String scaleAction = getAutoscaleAction(avgCounter, asGroup.getId(), currentVM);
+									if (scaleAction != null) {
+										s_logger.debug("[AutoScale] Doing scale action: " + scaleAction + " for group " + asGroup.getId());
+										if (scaleAction.equals("scaleup")) {
+											_asManager.doScaleUp(asGroup.getId(), 1);
+										} else {
+											_asManager.doScaleDown(asGroup.getId());
+										}
+									}
+								}
+							}
+
+						} catch (Exception e) {
+							e.printStackTrace();
+						}
+
+					}
+				}
+
+			} catch (Throwable t) {
+				s_logger.error("Error trying to monitor autoscaling", t);
+			}
+
+		}
+		
+		private String getAutoscaleAction(HashMap<Long, Double> avgCounter, long groupId, long currentVM) {
+			
+			List<AutoScaleVmGroupPolicyMapVO> listMap = _asGroupPolicyDao.listByVmGroupId(groupId);
+			if ( (listMap == null) || (listMap.size() == 0) )
+				return null;
+			for (AutoScaleVmGroupPolicyMapVO asVmgPmap : listMap) {
+				AutoScalePolicyVO policyVO = _asPolicyDao.findById(asVmgPmap.getPolicyId());
+				if (policyVO != null) {
+					Integer quitetime = policyVO.getQuietTime();
+					long last_quitetime = policyVO.getLastQuiteTime().getTime();
+					long current_time = (new Date()).getTime();
+
+					// check quite time for this policy
+					if ((current_time - last_quitetime) >= (long) quitetime) {
+
+						// list all condition of this policy
+						boolean bValid = true;
+						List<ConditionVO> lstConditions = getConditionsbyPolicyId(policyVO.getId());
+						if ((lstConditions != null) && (lstConditions.size() > 0)) {
+							// check whole conditions of this policy
+							for (ConditionVO conditionVO : lstConditions) {
+								long threholdValue = conditionVO.getThreshold();
+								Double sum = avgCounter.get(conditionVO.getCounterid());
+								Double avg = sum/ currentVM;
+										
+								Operator op = conditionVO.getRelationalOperator();
+								boolean bConditionCheck = ((op == com.cloud.network.as.Condition.Operator.EQ) && (threholdValue == avg))
+										|| ((op == com.cloud.network.as.Condition.Operator.GE) && (avg >= threholdValue))
+										|| ((op == com.cloud.network.as.Condition.Operator.GT) && (avg > threholdValue))
+										|| ((op == com.cloud.network.as.Condition.Operator.LE) && (avg <= threholdValue))
+										|| ((op == com.cloud.network.as.Condition.Operator.LT) && (avg < threholdValue));
+
+								if (!bConditionCheck) {
+									bValid = false;
+									break;
+								}
+							}
+							if (bValid) {
+								return policyVO.getAction();
+							}
+						}
+					}
+				}
+			}
+			return null;
+		}
+		
+		private List<ConditionVO> getConditionsbyPolicyId(long policyId){
+			List<AutoScalePolicyConditionMapVO> conditionMap = _asConditionMapDao.findByPolicyId(policyId);
+			if ( (conditionMap == null) || (conditionMap.size() == 0))
+				return null;
+			
+			List<ConditionVO> lstResult = new ArrayList<ConditionVO>();
+			for (AutoScalePolicyConditionMapVO asPCmap : conditionMap) {
+				lstResult.add(_asConditionDao.findById(asPCmap.getConditionId()));
+			}
+			
+			return lstResult;
+		}
+
+		public List<Pair<String, Integer>> getPairofCounternameAndDuration(
+				long groupId) {
+			AutoScaleVmGroupVO groupVo = _asGroupDao.findById(groupId);
+			if (groupVo == null)
+				return null;
+			List<Pair<String, Integer>> result = new ArrayList<Pair<String, Integer>>();
+			//list policy map
+			List<AutoScaleVmGroupPolicyMapVO> groupPolicymap = _asGroupPolicyDao.listByVmGroupId(groupVo.getId());
+			if (groupPolicymap == null)
+				return null;
+			for (AutoScaleVmGroupPolicyMapVO gpMap : groupPolicymap) {
+				//get duration
+				AutoScalePolicyVO policyVo = _asPolicyDao.findById(gpMap.getPolicyId());				
+				Integer duration = policyVo.getDuration();
+				//get collection of counter name
+				String counterNames = "";
+				List<AutoScalePolicyConditionMapVO> lstPCmap = _asConditionMapDao.findByPolicyId(policyVo.getId());
+				for (AutoScalePolicyConditionMapVO pcMap : lstPCmap) {
+					String counterName = getCounternamebyCondition(pcMap.getConditionId());
+					
+					counterNames += counterName + "," + pcMap.getConditionId();
+				}
+				// add to result
+				Pair<String, Integer> pair = new Pair<String, Integer>(counterNames, duration);
+				result.add(pair);
+			}
+
+			return result;
+		}
+		
+		public String getCounternamebyCondition(long conditionId){
+			
+			ConditionVO condition = _asConditionDao.findById(conditionId);
+			if (condition == null)
+				return "";
+			
+			long counterId = condition.getCounterid();
+			CounterVO counter = _asCounterDao.findById(counterId);
+			if (counter == null)
+				return "";
+			
+			return counter.getSource().toString();
+		}
+	}
+	public StorageStats getStorageStats(long id) {
+		return _storageStats.get(id);
+	}
+
+	public HostStats getHostStats(long hostId){
+		return _hostStats.get(hostId);
+	}
+
+	public StorageStats getStoragePoolStats(long id) {
+		return _storagePoolStats.get(id);
+	}
 }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/setup/db/create-schema.sql
----------------------------------------------------------------------
diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql
index 55cb4cc..2a53a71 100755
--- a/setup/db/create-schema.sql
+++ b/setup/db/create-schema.sql
@@ -152,6 +152,7 @@ DROP TABLE IF EXISTS `cloud`.`template_s3_ref`;
 DROP TABLE IF EXISTS `cloud`.`nicira_nvp_router_map`;
 DROP TABLE IF EXISTS `cloud`.`external_bigswitch_vns_devices`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroup_policy_map`;
+DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroup_vm_map`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_policy_condition_map`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_vmgroups`;
 DROP TABLE IF EXISTS `cloud`.`autoscale_vmprofiles`;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/setup/db/db/schema-40to410.sql
----------------------------------------------------------------------
diff --git a/setup/db/db/schema-40to410.sql b/setup/db/db/schema-40to410.sql
index 24f6d4c..6c5529a 100644
--- a/setup/db/db/schema-40to410.sql
+++ b/setup/db/db/schema-40to410.sql
@@ -345,6 +345,7 @@ CREATE TABLE `cloud`.`autoscale_policies` (
   `account_id` bigint unsigned NOT NULL,
   `duration` int unsigned NOT NULL,
   `quiet_time` int unsigned NOT NULL,
+  `last_quiet_time` datetime DEFAULT NULL,
   `action` varchar(15),
   `created` datetime NOT NULL COMMENT 'date created',
   `removed` datetime COMMENT 'date removed if not null',
@@ -366,6 +367,7 @@ CREATE TABLE `cloud`.`autoscale_vmgroups` (
   `max_members` int unsigned NOT NULL,
   `member_port` int unsigned NOT NULL,
   `interval` int unsigned NOT NULL,
+  `last_interval` datetime DEFAULT NULL,
   `profile_id` bigint unsigned NOT NULL,
   `state` varchar(255) NOT NULL COMMENT 'enabled or disabled, a vmgroup is disabled to stop autoscaling activity',
   `created` datetime NOT NULL COMMENT 'date created',
@@ -401,10 +403,22 @@ CREATE TABLE `cloud`.`autoscale_vmgroup_policy_map` (
   INDEX `i_autoscale_vmgroup_policy_map__vmgroup_id`(`vmgroup_id`)
 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
 
-INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now());
-INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now());
-INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now());
-INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now());
+CREATE TABLE `cloud`.`autoscale_vmgroup_vm_map` (
+  `id` bigint unsigned NOT NULL auto_increment,
+  `vmgroup_id` bigint unsigned NOT NULL,
+  `instance_id` bigint unsigned NOT NULL,
+  PRIMARY KEY  (`id`),
+  CONSTRAINT `fk_autoscale_vmgroup_vm_map__vmgroup_id` FOREIGN KEY `fk_autoscale_vmgroup_vm_map__vmgroup_id` (`vmgroup_id`) REFERENCES `autoscale_vmgroups` (`id`) ON DELETE CASCADE,
+  CONSTRAINT `fk_autoscale_vmgroup_vm_map__instance_id` FOREIGN KEY `fk_autoscale_vmgroup_vm_map__instance_id` (`instance_id`) REFERENCES `vm_instance` (`id`),
+  INDEX `i_autoscale_vmgroup_vm_map__vmgroup_id`(`vmgroup_id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+--INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'snmp','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now());
+--INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'snmp','Linux System CPU - percentage', '1.3.6.1.4.1.2021.11.10.0', now());
+--INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (3, UUID(), 'snmp','Linux CPU Idle - percentage', '1.3.6.1.4.1.2021.11.11.0', now());
+--INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (100, UUID(), 'netscaler','Response Time - microseconds', 'RESPTIME', now());
+INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (1, UUID(), 'cpu','Linux User CPU - percentage', '1.3.6.1.4.1.2021.11.9.0', now());
+INSERT INTO `cloud`.`counter` (id, uuid, source, name, value,created) VALUES (2, UUID(), 'memory','Linux User RAM - percentage', '1.3.6.1.4.1.2021.11.10.0', now());
 
 CREATE TABLE  `cloud`.`user_ipv6_address` (
   `id` bigint unsigned NOT NULL UNIQUE auto_increment,

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/dc151115/utils/conf/db.properties
----------------------------------------------------------------------
diff --git a/utils/conf/db.properties b/utils/conf/db.properties
index e1b5fe9..709d79e 100644
--- a/utils/conf/db.properties
+++ b/utils/conf/db.properties
@@ -25,7 +25,7 @@ region.id=1
 # CloudStack database settings
 db.cloud.username=cloud
 db.cloud.password=cloud
-db.root.password=
+db.root.password=123
 db.cloud.host=localhost
 db.cloud.port=3306
 db.cloud.name=cloud


Mime
View raw message