incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ahu...@apache.org
Subject [20/25] removed import of componentlocator and inject from all files
Date Thu, 10 Jan 2013 22:47:20 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/cluster/ClusterManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/cluster/ClusterManagerImpl.java b/server/src/com/cloud/cluster/ClusterManagerImpl.java
index 013034f..f71a866 100755
--- a/server/src/com/cloud/cluster/ClusterManagerImpl.java
+++ b/server/src/com/cloud/cluster/ClusterManagerImpl.java
@@ -29,7 +29,6 @@ import java.sql.SQLException;
 import java.sql.SQLRecoverableException;
 import java.util.ArrayList;
 import java.util.Date;
-import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -75,8 +74,6 @@ import com.cloud.utils.DateUtil;
 import com.cloud.utils.NumbersUtil;
 import com.cloud.utils.Profiler;
 import com.cloud.utils.PropertiesUtil;
-import com.cloud.utils.component.Adapters;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.concurrency.NamedThreadFactory;
 import com.cloud.utils.db.ConnectionConcierge;
 import com.cloud.utils.db.DB;
@@ -123,7 +120,7 @@ public class ClusterManagerImpl implements ClusterManager {
     private final ExecutorService _executor;
 
     private ClusterServiceAdapter _currentServiceAdapter;
-   
+
     @Inject
     private List<ClusterServiceAdapter> _serviceAdapters;
 
@@ -149,11 +146,11 @@ public class ClusterManagerImpl implements ClusterManager {
     private boolean _agentLBEnabled = false;
     private double _connectedAgentsThreshold = 0.7;
     private static boolean _agentLbHappened = false;
-    
-    private List<ClusterServicePdu> _clusterPduOutgoingQueue = new ArrayList<ClusterServicePdu>();
-    private List<ClusterServicePdu> _clusterPduIncomingQueue = new ArrayList<ClusterServicePdu>();
-    private Map<Long, ClusterServiceRequestPdu> _outgoingPdusWaitingForAck = new HashMap<Long, ClusterServiceRequestPdu>();
-    
+
+    private final List<ClusterServicePdu> _clusterPduOutgoingQueue = new ArrayList<ClusterServicePdu>();
+    private final List<ClusterServicePdu> _clusterPduIncomingQueue = new ArrayList<ClusterServicePdu>();
+    private final Map<Long, ClusterServiceRequestPdu> _outgoingPdusWaitingForAck = new HashMap<Long, ClusterServiceRequestPdu>();
+
     public ClusterManagerImpl() {
         _clusterPeers = new HashMap<String, ClusterService>();
 
@@ -164,13 +161,13 @@ public class ClusterManagerImpl implements ClusterManager {
         //
         _executor = Executors.newCachedThreadPool(new NamedThreadFactory("Cluster-Worker"));
     }
-    
+
     private void registerRequestPdu(ClusterServiceRequestPdu pdu) {
         synchronized(_outgoingPdusWaitingForAck) {
             _outgoingPdusWaitingForAck.put(pdu.getSequenceId(), pdu);
         }
     }
-    
+
     private ClusterServiceRequestPdu popRequestPdu(long ackSequenceId) {
         synchronized(_outgoingPdusWaitingForAck) {
             if(_outgoingPdusWaitingForAck.get(ackSequenceId) != null) {
@@ -179,10 +176,10 @@ public class ClusterManagerImpl implements ClusterManager {
                 return pdu;
             }
         }
-        
+
         return null;
     }
-    
+
     private void cancelClusterRequestToPeer(String strPeer) {
         List<ClusterServiceRequestPdu> candidates = new ArrayList<ClusterServiceRequestPdu>();
         synchronized(_outgoingPdusWaitingForAck) {
@@ -195,7 +192,7 @@ public class ClusterManagerImpl implements ClusterManager {
                 _outgoingPdusWaitingForAck.remove(pdu.getSequenceId());
             }
         }
-        
+
         for(ClusterServiceRequestPdu pdu : candidates) {
             s_logger.warn("Cancel cluster request PDU to peer: " + strPeer + ", pdu: " + _gson.toJson(pdu));
             synchronized(pdu) {
@@ -203,76 +200,78 @@ public class ClusterManagerImpl implements ClusterManager {
             }
         }
     }
-    
+
     private void addOutgoingClusterPdu(ClusterServicePdu pdu) {
-    	synchronized(_clusterPduOutgoingQueue) {
-    		_clusterPduOutgoingQueue.add(pdu);
-    		_clusterPduOutgoingQueue.notifyAll();
-    	}
+        synchronized(_clusterPduOutgoingQueue) {
+            _clusterPduOutgoingQueue.add(pdu);
+            _clusterPduOutgoingQueue.notifyAll();
+        }
     }
-    
+
     private ClusterServicePdu popOutgoingClusterPdu(long timeoutMs) {
-    	synchronized(_clusterPduOutgoingQueue) {
-    		try {
-				_clusterPduOutgoingQueue.wait(timeoutMs);
-			} catch (InterruptedException e) {
-			}
-			
-			if(_clusterPduOutgoingQueue.size() > 0) {
-				ClusterServicePdu pdu = _clusterPduOutgoingQueue.get(0);
-				_clusterPduOutgoingQueue.remove(0);
-				return pdu;
-			}
-    	}
-    	return null;
+        synchronized(_clusterPduOutgoingQueue) {
+            try {
+                _clusterPduOutgoingQueue.wait(timeoutMs);
+            } catch (InterruptedException e) {
+            }
+
+            if(_clusterPduOutgoingQueue.size() > 0) {
+                ClusterServicePdu pdu = _clusterPduOutgoingQueue.get(0);
+                _clusterPduOutgoingQueue.remove(0);
+                return pdu;
+            }
+        }
+        return null;
     }
 
     private void addIncomingClusterPdu(ClusterServicePdu pdu) {
-    	synchronized(_clusterPduIncomingQueue) {
-    		_clusterPduIncomingQueue.add(pdu);
-    		_clusterPduIncomingQueue.notifyAll();
-    	}
+        synchronized(_clusterPduIncomingQueue) {
+            _clusterPduIncomingQueue.add(pdu);
+            _clusterPduIncomingQueue.notifyAll();
+        }
     }
-    
+
     private ClusterServicePdu popIncomingClusterPdu(long timeoutMs) {
-    	synchronized(_clusterPduIncomingQueue) {
-    		try {
-    			_clusterPduIncomingQueue.wait(timeoutMs);
-			} catch (InterruptedException e) {
-			}
-			
-			if(_clusterPduIncomingQueue.size() > 0) {
-				ClusterServicePdu pdu = _clusterPduIncomingQueue.get(0);
-				_clusterPduIncomingQueue.remove(0);
-				return pdu;
-			}
-    	}
-    	return null;
+        synchronized(_clusterPduIncomingQueue) {
+            try {
+                _clusterPduIncomingQueue.wait(timeoutMs);
+            } catch (InterruptedException e) {
+            }
+
+            if(_clusterPduIncomingQueue.size() > 0) {
+                ClusterServicePdu pdu = _clusterPduIncomingQueue.get(0);
+                _clusterPduIncomingQueue.remove(0);
+                return pdu;
+            }
+        }
+        return null;
     }
-    
+
     private Runnable getClusterPduSendingTask() {
         return new Runnable() {
+            @Override
             public void run() {
                 onSendingClusterPdu();
             }
         };
     }
-    
+
     private Runnable getClusterPduNotificationTask() {
         return new Runnable() {
+            @Override
             public void run() {
                 onNotifyingClusterPdu();
             }
         };
     }
-    
+
     private void onSendingClusterPdu() {
         while(true) {
             try {
                 ClusterServicePdu pdu = popOutgoingClusterPdu(1000);
                 if(pdu == null)
-                	continue;
-                	
+                    continue;
+
                 ClusterService peerService =  null;
                 for(int i = 0; i < 2; i++) {
                     try {
@@ -285,20 +284,20 @@ public class ClusterManagerImpl implements ClusterManager {
                         try {
                             if(s_logger.isDebugEnabled()) {
                                 s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + ". agent: " + pdu.getAgentId() 
-                                    + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
+                                        + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
                             }
 
                             long startTick = System.currentTimeMillis();
                             String strResult = peerService.execute(pdu);
                             if(s_logger.isDebugEnabled()) {
                                 s_logger.debug("Cluster PDU " + getSelfPeerName() + " -> " + pdu.getDestPeer() + " completed. time: " +
-                                    (System.currentTimeMillis() - startTick) + "ms. agent: " + pdu.getAgentId() 
-                                     + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
+                                        (System.currentTimeMillis() - startTick) + "ms. agent: " + pdu.getAgentId() 
+                                        + ", pdu seq: " + pdu.getSequenceId() + ", pdu ack seq: " + pdu.getAckSequenceId() + ", json: " + pdu.getJsonPackage());
                             }
-                            
+
                             if("true".equals(strResult))
                                 break;
-                            
+
                         } catch (RemoteException e) {
                             invalidatePeerService(pdu.getDestPeer());
                             if(s_logger.isInfoEnabled()) {
@@ -313,50 +312,51 @@ public class ClusterManagerImpl implements ClusterManager {
             }
         }
     }
-    
+
     private void onNotifyingClusterPdu() {
         while(true) {
             try {
                 final ClusterServicePdu pdu = popIncomingClusterPdu(1000);
                 if(pdu == null)
-                	continue;
+                    continue;
 
                 _executor.execute(new Runnable() {
-                	public void run() {
-		                if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_RESPONSE) {
-		                    ClusterServiceRequestPdu requestPdu = popRequestPdu(pdu.getAckSequenceId());
-		                    if(requestPdu != null) {
-		                        requestPdu.setResponseResult(pdu.getJsonPackage());
-		                        synchronized(requestPdu) {
-		                            requestPdu.notifyAll();
-		                        }
-		                    } else {
-		                        s_logger.warn("Original request has already been cancelled. pdu: " + _gson.toJson(pdu));
-		                    }
-		                } else {
-		                    String result = dispatchClusterServicePdu(pdu);
-		                    if(result == null)
-		                        result = "";
-		                    
-		                    if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_REQUEST) {
-			                    ClusterServicePdu responsePdu = new ClusterServicePdu();
-			                    responsePdu.setPduType(ClusterServicePdu.PDU_TYPE_RESPONSE);
-			                    responsePdu.setSourcePeer(pdu.getDestPeer());
-			                    responsePdu.setDestPeer(pdu.getSourcePeer());
-			                    responsePdu.setAckSequenceId(pdu.getSequenceId());
-			                    responsePdu.setJsonPackage(result);
-			                    
-			                    addOutgoingClusterPdu(responsePdu);
-		                    }
-		                }
-                	}
+                    @Override
+                    public void run() {
+                        if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_RESPONSE) {
+                            ClusterServiceRequestPdu requestPdu = popRequestPdu(pdu.getAckSequenceId());
+                            if(requestPdu != null) {
+                                requestPdu.setResponseResult(pdu.getJsonPackage());
+                                synchronized(requestPdu) {
+                                    requestPdu.notifyAll();
+                                }
+                            } else {
+                                s_logger.warn("Original request has already been cancelled. pdu: " + _gson.toJson(pdu));
+                            }
+                        } else {
+                            String result = dispatchClusterServicePdu(pdu);
+                            if(result == null)
+                                result = "";
+
+                            if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_REQUEST) {
+                                ClusterServicePdu responsePdu = new ClusterServicePdu();
+                                responsePdu.setPduType(ClusterServicePdu.PDU_TYPE_RESPONSE);
+                                responsePdu.setSourcePeer(pdu.getDestPeer());
+                                responsePdu.setDestPeer(pdu.getSourcePeer());
+                                responsePdu.setAckSequenceId(pdu.getSequenceId());
+                                responsePdu.setJsonPackage(result);
+
+                                addOutgoingClusterPdu(responsePdu);
+                            }
+                        }
+                    }
                 });
             } catch(Throwable e) {
                 s_logger.error("Unexcpeted exception: ", e);
             }
         }
     }
-    
+
     private String dispatchClusterServicePdu(ClusterServicePdu pdu) {
 
         if(s_logger.isDebugEnabled()) {
@@ -370,7 +370,7 @@ public class ClusterManagerImpl implements ClusterManager {
             assert(false);
             s_logger.error("Excection in gson decoding : ", e);
         }
-        
+
         if (cmds.length == 1 && cmds[0] instanceof ChangeAgentCommand) {  //intercepted
             ChangeAgentCommand cmd = (ChangeAgentCommand)cmds[0];
 
@@ -416,22 +416,22 @@ public class ClusterManagerImpl implements ClusterManager {
             answers[0] = new Answer(cmd, result, null);
             return _gson.toJson(answers);
         } else if (cmds.length == 1 && cmds[0] instanceof PropagateResourceEventCommand ) {
-        	PropagateResourceEventCommand cmd = (PropagateResourceEventCommand) cmds[0];
-        	
-        	s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
-        	
-        	boolean result = false;
-        	try {
-        		result = executeResourceUserRequest(cmd.getHostId(), cmd.getEvent());
-        		s_logger.debug("Result is " + result);
-        	} catch (AgentUnavailableException ex) {
-        		s_logger.warn("Agent is unavailable", ex);
-        		return null;
-        	}
-        	
-        	Answer[] answers = new Answer[1];
-        	answers[0] = new Answer(cmd, result, null);
-        	return _gson.toJson(answers);
+            PropagateResourceEventCommand cmd = (PropagateResourceEventCommand) cmds[0];
+
+            s_logger.debug("Intercepting command to propagate event " + cmd.getEvent().name() + " for host " + cmd.getHostId());
+
+            boolean result = false;
+            try {
+                result = executeResourceUserRequest(cmd.getHostId(), cmd.getEvent());
+                s_logger.debug("Result is " + result);
+            } catch (AgentUnavailableException ex) {
+                s_logger.warn("Agent is unavailable", ex);
+                return null;
+            }
+
+            Answer[] answers = new Answer[1];
+            answers[0] = new Answer(cmd, result, null);
+            return _gson.toJson(answers);
         }
 
         try {
@@ -461,14 +461,15 @@ public class ClusterManagerImpl implements ClusterManager {
         } catch (OperationTimedoutException e) {
             s_logger.warn("Timed Out", e);
         }
-        
+
         return null;
     }
 
+    @Override
     public void OnReceiveClusterServicePdu(ClusterServicePdu pdu) {
-    	addIncomingClusterPdu(pdu);
+        addIncomingClusterPdu(pdu);
     }
-    
+
     @Override
     public Answer[] sendToAgent(Long hostId, Command[] cmds, boolean stopOnError) throws AgentUnavailableException, OperationTimedoutException {
         Commands commands = new Commands(stopOnError ? OnError.Stop : OnError.Continue);
@@ -558,7 +559,7 @@ public class ClusterManagerImpl implements ClusterManager {
             s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " " +
                     _gson.toJson(cmds, Command[].class));
         }
-        
+
         ClusterServiceRequestPdu pdu = new ClusterServiceRequestPdu();
         pdu.setSourcePeer(getSelfPeerName());
         pdu.setDestPeer(strPeer);
@@ -567,7 +568,7 @@ public class ClusterManagerImpl implements ClusterManager {
         pdu.setStopOnError(stopOnError);
         registerRequestPdu(pdu);
         addOutgoingClusterPdu(pdu);
-        
+
         synchronized(pdu) {
             try {
                 pdu.wait();
@@ -577,9 +578,9 @@ public class ClusterManagerImpl implements ClusterManager {
 
         if(s_logger.isDebugEnabled()) {
             s_logger.debug(getSelfPeerName() + " -> " + strPeer + "." + agentId + " completed. result: " +
-                pdu.getResponseResult());
+                    pdu.getResponseResult());
         }
-        
+
         if(pdu.getResponseResult() != null && pdu.getResponseResult().length() > 0) {
             try {
                 return _gson.fromJson(pdu.getResponseResult(), Answer[].class);
@@ -590,7 +591,7 @@ public class ClusterManagerImpl implements ClusterManager {
 
         return null;
     }
-    
+
     @Override
     public String getPeerName(long agentHostId) {
 
@@ -625,18 +626,18 @@ public class ClusterManagerImpl implements ClusterManager {
         // Note : we don't check duplicates
         synchronized (_listeners) {
 
-    		s_logger.info("register cluster listener " + listener.getClass());
-    		
-        	_listeners.add(listener);
+            s_logger.info("register cluster listener " + listener.getClass());
+
+            _listeners.add(listener);
         }
     }
 
     @Override
     public void unregisterListener(ClusterManagerListener listener) {
         synchronized(_listeners) {
-    		s_logger.info("unregister cluster listener " + listener.getClass());
-        	
-        	_listeners.remove(listener);
+            s_logger.info("unregister cluster listener " + listener.getClass());
+
+            _listeners.remove(listener);
         }
     }
 
@@ -663,7 +664,7 @@ public class ClusterManagerImpl implements ClusterManager {
         if(s_logger.isDebugEnabled()) {
             s_logger.debug("Notify management server node left to listeners.");
         }
-        
+
         for(ManagementServerHostVO mshost : nodeList) {
             if(s_logger.isDebugEnabled())
                 s_logger.debug("Leaving node, IP: " + mshost.getServiceIP() + ", msid: " + mshost.getMsid());
@@ -731,32 +732,32 @@ public class ClusterManagerImpl implements ClusterManager {
                     Profiler profilerHeartbeatUpdate = new Profiler();
                     Profiler profilerPeerScan = new Profiler();
                     Profiler profilerAgentLB = new Profiler();
-                    
+
                     try {
                         profiler.start();
-                        
+
                         profilerHeartbeatUpdate.start();
                         txn.transitToUserManagedConnection(getHeartbeatConnection());
                         if(s_logger.isTraceEnabled()) {
                             s_logger.trace("Cluster manager heartbeat update, id:" + _mshostId);
                         }
-    
+
                         _mshostDao.update(_mshostId, getCurrentRunId(), DateUtil.currentGMTTime());
                         profilerHeartbeatUpdate.stop();
-    
+
                         profilerPeerScan.start();
                         if (s_logger.isTraceEnabled()) {
                             s_logger.trace("Cluster manager peer-scan, id:" + _mshostId);
                         }
-    
+
                         if (!_peerScanInited) {
                             _peerScanInited = true;
                             initPeerScan();
                         }
-                        
+
                         peerScan();
                         profilerPeerScan.stop();
-                        
+
                         profilerAgentLB.start();
                         //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold
                         if (_agentLBEnabled && !_agentLbHappened) {
@@ -764,7 +765,7 @@ public class ClusterManagerImpl implements ClusterManager {
                             sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL);
                             sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing);
                             List<HostVO> allManagedRoutingAgents = sc.list();
-                            
+
                             sc = SearchCriteria2.create(HostVO.class);
                             sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing);
                             List<HostVO> allAgents = sc.list();
@@ -784,16 +785,16 @@ public class ClusterManagerImpl implements ClusterManager {
                         profilerAgentLB.stop();
                     } finally {
                         profiler.stop();
-                        
+
                         if(profiler.getDuration() >= _heartbeatInterval) {
                             if(s_logger.isDebugEnabled())
                                 s_logger.debug("Management server heartbeat takes too long to finish. profiler: " + profiler.toString() + 
-                                    ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() +
-                                    ", profilerPeerScan: " + profilerPeerScan.toString() +
-                                    ", profilerAgentLB: " + profilerAgentLB.toString());
+                                        ", profilerHeartbeatUpdate: " + profilerHeartbeatUpdate.toString() +
+                                        ", profilerPeerScan: " + profilerPeerScan.toString() +
+                                        ", profilerAgentLB: " + profilerAgentLB.toString());
                         }
                     }
-                    
+
                 } catch(CloudRuntimeException e) {
                     s_logger.error("Runtime DB exception ", e.getCause());
 
@@ -933,33 +934,33 @@ public class ClusterManagerImpl implements ClusterManager {
             this._notificationMsgs.add(msg);
             this._notificationMsgs.notifyAll();
         }
-        
+
         switch(msg.getMessageType()) {
         case nodeAdded:
-            {
-                List<ManagementServerHostVO> l = msg.getNodes();
-                if(l != null && l.size() > 0) {
-                    for(ManagementServerHostVO mshost: l) {
-                        _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Up);
-                    }
+        {
+            List<ManagementServerHostVO> l = msg.getNodes();
+            if(l != null && l.size() > 0) {
+                for(ManagementServerHostVO mshost: l) {
+                    _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Up);
                 }
             }
-            break;
-            
+        }
+        break;
+
         case nodeRemoved:
-            {
-                List<ManagementServerHostVO> l = msg.getNodes();
-                if(l != null && l.size() > 0) {
-                    for(ManagementServerHostVO mshost: l) {
-                        _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Down);
-                    }
+        {
+            List<ManagementServerHostVO> l = msg.getNodes();
+            if(l != null && l.size() > 0) {
+                for(ManagementServerHostVO mshost: l) {
+                    _mshostPeerDao.updatePeerInfo(_mshostId, mshost.getId(), mshost.getRunid(), ManagementServerHost.State.Down);
                 }
             }
-            break;
-            
+        }
+        break;
+
         default :
             break;
-        
+
         }
     }
 
@@ -978,39 +979,39 @@ public class ClusterManagerImpl implements ClusterManager {
         // missed cleanup
         Date cutTime = DateUtil.currentGMTTime();
         List<ManagementServerHostVO> inactiveList = _mshostDao.getInactiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
-       
+
         // We don't have foreign key constraints to enforce the mgmt_server_id integrity in host table, when user manually 
         // remove records from mshost table, this will leave orphan mgmt_serve_id reference in host table.
         List<Long> orphanList = _mshostDao.listOrphanMsids();
         if(orphanList.size() > 0) {
-	        for(Long orphanMsid : orphanList) {
-	        	// construct fake ManagementServerHostVO based on orphan MSID
-	        	s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid);
-	        	inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date()));
-	        }
+            for(Long orphanMsid : orphanList) {
+                // construct fake ManagementServerHostVO based on orphan MSID
+                s_logger.info("Add orphan management server msid found in host table to initial clustering notification, orphan msid: " + orphanMsid);
+                inactiveList.add(new ManagementServerHostVO(orphanMsid, 0, "orphan", 0, new Date()));
+            }
         } else {
-        	s_logger.info("We are good, no orphan management server msid in host table is found");
+            s_logger.info("We are good, no orphan management server msid in host table is found");
         }
-        
+
         if(inactiveList.size() > 0) {
-        	if(s_logger.isInfoEnabled()) {
-        		s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp");
-        		for(ManagementServerHostVO host : inactiveList)
-        			s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion());
-        	}
+            if(s_logger.isInfoEnabled()) {
+                s_logger.info("Found " + inactiveList.size() + " inactive management server node based on timestamp");
+                for(ManagementServerHostVO host : inactiveList)
+                    s_logger.info("management server node msid: " + host.getMsid() + ", name: " + host.getName() + ", service ip: " + host.getServiceIP() + ", version: " + host.getVersion());
+            }
 
-        	List<ManagementServerHostVO> downHostList = new ArrayList<ManagementServerHostVO>();
+            List<ManagementServerHostVO> downHostList = new ArrayList<ManagementServerHostVO>();
             for(ManagementServerHostVO host : inactiveList) {
-	            if(!pingManagementNode(host)) {
-	                s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable");
-	                downHostList.add(host);	
-	            }
+                if(!pingManagementNode(host)) {
+                    s_logger.warn("Management node " + host.getId() + " is detected inactive by timestamp and also not pingable");
+                    downHostList.add(host);	
+                }
             }
-            
+
             if(downHostList.size() > 0)
-            	this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList));
+                this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeRemoved, downHostList));
         } else {
-        	s_logger.info("No inactive management server node found");
+            s_logger.info("No inactive management server node found");
         }
     }
 
@@ -1019,7 +1020,7 @@ public class ClusterManagerImpl implements ClusterManager {
 
         Profiler profiler = new Profiler();
         profiler.start();
-        
+
         Profiler profilerQueryActiveList = new Profiler();
         profilerQueryActiveList.start();
         List<ManagementServerHostVO> currentList = _mshostDao.getActiveList(new Date(cutTime.getTime() - _heartbeatThreshold));
@@ -1031,13 +1032,13 @@ public class ClusterManagerImpl implements ClusterManager {
         List<ManagementServerHostVO> invalidatedNodeList = new ArrayList<ManagementServerHostVO>();
 
         if(_mshostId != null) {
-            
+
             if(_mshostPeerDao.countStateSeenInPeers(_mshostId, _runId, ManagementServerHost.State.Down) > 0) {
                 String msg = "We have detected that at least one management server peer reports that this management server is down, perform active fencing to avoid split-brain situation";
                 s_logger.error(msg);
                 throw new ActiveFencingException(msg);
             }
-            
+
             // only if we have already attached to cluster, will we start to check leaving nodes
             for(Map.Entry<Long, ManagementServerHostVO>  entry : _activePeers.entrySet()) {
 
@@ -1070,7 +1071,7 @@ public class ClusterManagerImpl implements ClusterManager {
             }
         }
         profilerSyncClusterInfo.stop();
-        
+
         Profiler profilerInvalidatedNodeList = new Profiler();
         profilerInvalidatedNodeList.start();
         // process invalidated node list
@@ -1134,16 +1135,16 @@ public class ClusterManagerImpl implements ClusterManager {
         if(newNodeList.size() > 0) {
             this.queueNotification(new ClusterManagerMessage(ClusterManagerMessage.MessageType.nodeAdded, newNodeList));
         }
-        
+
         profiler.stop();
-        
+
         if(profiler.getDuration() >= this._heartbeatInterval) {
             if(s_logger.isDebugEnabled())
                 s_logger.debug("Peer scan takes too long to finish. profiler: " + profiler.toString()
-                  + ", profilerQueryActiveList: " + profilerQueryActiveList.toString()
-                  + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString()
-                  + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString()
-                  + ", profilerRemovedList: " + profilerRemovedList.toString());
+                        + ", profilerQueryActiveList: " + profilerQueryActiveList.toString()
+                        + ", profilerSyncClusterInfo: " + profilerSyncClusterInfo.toString()
+                        + ", profilerInvalidatedNodeList: " + profilerInvalidatedNodeList.toString()
+                        + ", profilerRemovedList: " + profilerRemovedList.toString());
         }
     }
 
@@ -1206,7 +1207,7 @@ public class ClusterManagerImpl implements ClusterManager {
             if (s_logger.isInfoEnabled()) {
                 s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort());
             }
-            
+
             _mshostPeerDao.clearPeerInfo(_mshostId);
 
             // use seperate thread for heartbeat updates
@@ -1294,8 +1295,8 @@ public class ClusterManagerImpl implements ClusterManager {
         }
 
         for(int i = 0; i < DEFAULT_OUTGOING_WORKERS; i++)
-        	_executor.execute(getClusterPduSendingTask());
-        
+            _executor.execute(getClusterPduSendingTask());
+
         // notification task itself in turn works as a task dispatcher
         _executor.execute(getClusterPduNotificationTask());
 
@@ -1309,9 +1310,9 @@ public class ClusterManagerImpl implements ClusterManager {
         }
 
         _agentLBEnabled = Boolean.valueOf(_configDao.getValue(Config.AgentLbEnable.key()));
-        
+
         String connectedAgentsThreshold = configs.get("agent.load.threshold");
-        
+
         if (connectedAgentsThreshold != null) {
             _connectedAgentsThreshold = Double.parseDouble(connectedAgentsThreshold);
         }
@@ -1365,7 +1366,7 @@ public class ClusterManagerImpl implements ClusterManager {
             s_logger.info("ping management node cluster service can not be performed on self");
             return false;
         }
-     
+
         int retry = 10;
         while (--retry > 0) {
             SocketChannel sch = null;
@@ -1381,7 +1382,7 @@ public class ClusterManagerImpl implements ClusterManager {
             } catch (IOException e) {
                 if (e instanceof ConnectException) {
                     s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " due to ConnectException", e);
-                	return false;
+                    return false;
                 }
             } finally {
                 if (sch != null) {
@@ -1397,7 +1398,7 @@ public class ClusterManagerImpl implements ClusterManager {
             } catch (InterruptedException ex) {
             }
         }
-        
+
         s_logger.error("Unable to ping management server at " + targetIp + ":" + mshost.getServicePort() + " after retries");
         return false;
     }
@@ -1455,7 +1456,7 @@ public class ClusterManagerImpl implements ClusterManager {
     public  boolean isAgentRebalanceEnabled() {
         return _agentLBEnabled;
     }
-    
+
     @Override
     public Boolean propagateResourceEvent(long agentId, ResourceState.Event event) throws AgentUnavailableException {
         final String msPeer = getPeerName(agentId);
@@ -1480,7 +1481,7 @@ public class ClusterManagerImpl implements ClusterManager {
 
         return answers[0].getResult();
     }
-    
+
     @Override
     public boolean executeResourceUserRequest(long hostId, ResourceState.Event event) throws AgentUnavailableException {
         return _resourceMgr.executeUserRequest(hostId, event);

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/cluster/StackMaid.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/cluster/StackMaid.java b/server/src/com/cloud/cluster/StackMaid.java
deleted file mode 100644
index b84d73d..0000000
--- a/server/src/com/cloud/cluster/StackMaid.java
+++ /dev/null
@@ -1,153 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.cluster;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.log4j.Logger;
-
-import com.cloud.cluster.dao.StackMaidDao;
-import com.cloud.cluster.dao.StackMaidDaoImpl;
-import com.cloud.serializer.SerializerHelper;
-import com.cloud.utils.CleanupDelegate;
-import com.cloud.utils.db.Transaction;
-
-public class StackMaid {
-    protected final static Logger s_logger = Logger.getLogger(StackMaid.class);
-	
-	private static ThreadLocal<StackMaid> threadMaid = new ThreadLocal<StackMaid>();
-	
-	private static long msid_setby_manager = 0;
-
-	private StackMaidDao maidDao = new StackMaidDaoImpl(); 
-	private int currentSeq = 0;
-	private Map<String, Object> context = new HashMap<String, Object>();
-
-	public static void init(long msid) {
-		msid_setby_manager = msid;
-	}
-	
-	public static StackMaid current() {
-		StackMaid maid = threadMaid.get();
-		if(maid == null) {
-			maid = new StackMaid();
-			threadMaid.set(maid);
-		}
-		return maid;
-	}
-	
-	public void registerContext(String key, Object contextObject) {
-		assert(!context.containsKey(key)) : "Context key has already been registered";
-		context.put(key, contextObject);
-	}
-	
-	public Object getContext(String key) {
-		return context.get(key);
-	}
-	
-	public void expungeMaidItem(long maidId) {
-		// this is a bit ugly, but when it is not loaded by component locator, this is just a workable way for now
-		Transaction txn = Transaction.open(Transaction.CLOUD_DB);
-		try {
-			maidDao.expunge(maidId);
-		} finally {
-			txn.close();
-		}
-	}
-
-	public int push(String delegateClzName, Object context) {
-		assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
-		if(msid_setby_manager == 0)
-			s_logger.error("Fatal, make sure StackMaidManager is loaded");
-		
-		return push(msid_setby_manager, delegateClzName, context);
-	}
-	
-	public int push(long currentMsid, String delegateClzName, Object context) {
-		int savePoint = currentSeq;
-		maidDao.pushCleanupDelegate(currentMsid, currentSeq++, delegateClzName, context);
-		return savePoint;
-	}
-
-	public void pop(int savePoint) {
-		assert(msid_setby_manager != 0) : "Fatal, make sure StackMaidManager is loaded";
-		if(msid_setby_manager == 0)
-			s_logger.error("Fatal, make sure StackMaidManager is loaded");
-		
-		pop(msid_setby_manager, savePoint);
-	}
-	
-	public void pop() {
-	    if(currentSeq > 0)
-	        pop(currentSeq -1);
-	}
-	
-	/**
-	 * must be called within thread context
-	 * @param currentMsid
-	 */
-	public void pop(long currentMsid, int savePoint) {
-		while(currentSeq > savePoint) {
-			maidDao.popCleanupDelegate(currentMsid);
-			currentSeq--;
-		}
-	}
-	
-	public void exitCleanup() {
-		exitCleanup(msid_setby_manager);
-	}
-	
-	public void exitCleanup(long currentMsid) {
-		if(currentSeq > 0) {
-			CheckPointVO maid = null;
-			while((maid = maidDao.popCleanupDelegate(currentMsid)) != null) {
-				doCleanup(maid);
-			}
-			currentSeq = 0;
-		}
-		
-		context.clear();
-	}
-	
-	public static boolean doCleanup(CheckPointVO maid) {
-		if(maid.getDelegate() != null) {
-			try {
-				Class<?> clz = Class.forName(maid.getDelegate());
-				Object delegate = clz.newInstance();
-				if(delegate instanceof CleanupDelegate) {
-					return ((CleanupDelegate)delegate).cleanup(SerializerHelper.fromSerializedString(maid.getContext()), maid);
-				} else {
-					assert(false);
-				}
-			} catch (final ClassNotFoundException e) {
-				s_logger.error("Unable to load StackMaid delegate class: " + maid.getDelegate(), e);
-			} catch (final SecurityException e) {
-				s_logger.error("Security excetion when loading resource: " + maid.getDelegate());
-            } catch (final IllegalArgumentException e) {
-            	s_logger.error("Illegal argument excetion when loading resource: " + maid.getDelegate());
-            } catch (final InstantiationException e) {
-            	s_logger.error("Instantiation excetion when loading resource: " + maid.getDelegate());
-            } catch (final IllegalAccessException e) {
-            	s_logger.error("Illegal access exception when loading resource: " + maid.getDelegate());
-            } 
-            
-            return false;
-		}
-		return true;
-	}
-}

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java b/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java
index 2c550ea..2612192 100644
--- a/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java
+++ b/server/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java
@@ -34,7 +34,6 @@ import com.cloud.configuration.ResourceLimit;
 import com.cloud.domain.dao.DomainDaoImpl;
 import com.cloud.exception.UnsupportedServiceException;
 import com.cloud.user.dao.AccountDaoImpl;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GenericDaoBase;
 import com.cloud.utils.db.SearchBuilder;
@@ -44,90 +43,90 @@ import com.cloud.utils.db.Transaction;
 @Component
 @Local(value={ResourceCountDao.class})
 public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long> implements ResourceCountDao {
-	private SearchBuilder<ResourceCountVO> TypeSearch;
-	
-	private SearchBuilder<ResourceCountVO> AccountSearch;
-    private SearchBuilder<ResourceCountVO> DomainSearch;
-	
-	//protected final DomainDaoImpl _domainDao = ComponentLocator.inject(DomainDaoImpl.class);
-	//protected final AccountDaoImpl _accountDao = ComponentLocator.inject(AccountDaoImpl.class);
-
-	@Inject protected DomainDaoImpl _domainDao;
-	@Inject protected AccountDaoImpl _accountDao;
-
-	public ResourceCountDaoImpl() {
-		TypeSearch = createSearchBuilder();
-		TypeSearch.and("type", TypeSearch.entity().getType(), SearchCriteria.Op.EQ);
-	    TypeSearch.and("accountId", TypeSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
-	    TypeSearch.and("domainId", TypeSearch.entity().getDomainId(), SearchCriteria.Op.EQ);
-	    TypeSearch.done();
-		
-		AccountSearch = createSearchBuilder();
-		AccountSearch.and("accountId", AccountSearch.entity().getAccountId(), SearchCriteria.Op.NNULL);
-		AccountSearch.done();
-		
-		DomainSearch = createSearchBuilder();
-		DomainSearch.and("domainId", DomainSearch.entity().getDomainId(), SearchCriteria.Op.NNULL);
-		DomainSearch.done();
-	}
-	
-	@Override 
-	public ResourceCountVO findByOwnerAndType(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
-	    SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
-	    sc.setParameters("type", type);
-	    
-	    if (ownerType == ResourceOwnerType.Account) {
-	        sc.setParameters("accountId", ownerId);
-	        return findOneIncludingRemovedBy(sc);
-	    } else if (ownerType == ResourceOwnerType.Domain) {
-	        sc.setParameters("domainId", ownerId);
+    private final SearchBuilder<ResourceCountVO> TypeSearch;
+
+    private final SearchBuilder<ResourceCountVO> AccountSearch;
+    private final SearchBuilder<ResourceCountVO> DomainSearch;
+
+    //protected final DomainDaoImpl _domainDao = ComponentLocator.inject(DomainDaoImpl.class);
+    //protected final AccountDaoImpl _accountDao = ComponentLocator.inject(AccountDaoImpl.class);
+
+    @Inject protected DomainDaoImpl _domainDao;
+    @Inject protected AccountDaoImpl _accountDao;
+
+    public ResourceCountDaoImpl() {
+        TypeSearch = createSearchBuilder();
+        TypeSearch.and("type", TypeSearch.entity().getType(), SearchCriteria.Op.EQ);
+        TypeSearch.and("accountId", TypeSearch.entity().getAccountId(), SearchCriteria.Op.EQ);
+        TypeSearch.and("domainId", TypeSearch.entity().getDomainId(), SearchCriteria.Op.EQ);
+        TypeSearch.done();
+
+        AccountSearch = createSearchBuilder();
+        AccountSearch.and("accountId", AccountSearch.entity().getAccountId(), SearchCriteria.Op.NNULL);
+        AccountSearch.done();
+
+        DomainSearch = createSearchBuilder();
+        DomainSearch.and("domainId", DomainSearch.entity().getDomainId(), SearchCriteria.Op.NNULL);
+        DomainSearch.done();
+    }
+
+    @Override 
+    public ResourceCountVO findByOwnerAndType(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
+        SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
+        sc.setParameters("type", type);
+
+        if (ownerType == ResourceOwnerType.Account) {
+            sc.setParameters("accountId", ownerId);
+            return findOneIncludingRemovedBy(sc);
+        } else if (ownerType == ResourceOwnerType.Domain) {
+            sc.setParameters("domainId", ownerId);
             return findOneIncludingRemovedBy(sc);
-	    } else {
-	        return null;
-	    }
-	}
-	
-	@Override
-	public long getResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
-	    ResourceCountVO vo = findByOwnerAndType(ownerId, ownerType, type);
-	    if (vo != null) {
-	        return vo.getCount();
-	    } else {
-	        return 0;
-	    }
-	}
-	
-	@Override 
-	public void setResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type, long count) {
-	    ResourceCountVO resourceCountVO = findByOwnerAndType(ownerId, ownerType, type);
+        } else {
+            return null;
+        }
+    }
+
+    @Override
+    public long getResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
+        ResourceCountVO vo = findByOwnerAndType(ownerId, ownerType, type);
+        if (vo != null) {
+            return vo.getCount();
+        } else {
+            return 0;
+        }
+    }
+
+    @Override 
+    public void setResourceCount(long ownerId, ResourceOwnerType ownerType, ResourceType type, long count) {
+        ResourceCountVO resourceCountVO = findByOwnerAndType(ownerId, ownerType, type);
         if (count != resourceCountVO.getCount()) {
             resourceCountVO.setCount(count);
             update(resourceCountVO.getId(), resourceCountVO);
         }
-	}
+    }
 
-	@Override @Deprecated
-	public void updateDomainCount(long domainId, ResourceType type, boolean increment, long delta) {
-		delta = increment ? delta : delta * -1;
+    @Override @Deprecated
+    public void updateDomainCount(long domainId, ResourceType type, boolean increment, long delta) {
+        delta = increment ? delta : delta * -1;
 
         ResourceCountVO resourceCountVO = findByOwnerAndType(domainId, ResourceOwnerType.Domain, type);
-		resourceCountVO.setCount(resourceCountVO.getCount() + delta);
-		update(resourceCountVO.getId(), resourceCountVO);	
-	}
-	
-	@Override
-	public boolean updateById(long id, boolean increment, long delta) {
-	    delta = increment ? delta : delta * -1;
-	    
-	    ResourceCountVO resourceCountVO = findById(id);
-	    resourceCountVO.setCount(resourceCountVO.getCount() + delta);
-	    return update(resourceCountVO.getId(), resourceCountVO);
-	}
-	
-	@Override
-	public Set<Long> listRowsToUpdateForDomain(long domainId, ResourceType type) {
-	    Set<Long> rowIds = new HashSet<Long>();
-	    Set<Long> domainIdsToUpdate = _domainDao.getDomainParentIds(domainId);
+        resourceCountVO.setCount(resourceCountVO.getCount() + delta);
+        update(resourceCountVO.getId(), resourceCountVO);	
+    }
+
+    @Override
+    public boolean updateById(long id, boolean increment, long delta) {
+        delta = increment ? delta : delta * -1;
+
+        ResourceCountVO resourceCountVO = findById(id);
+        resourceCountVO.setCount(resourceCountVO.getCount() + delta);
+        return update(resourceCountVO.getId(), resourceCountVO);
+    }
+
+    @Override
+    public Set<Long> listRowsToUpdateForDomain(long domainId, ResourceType type) {
+        Set<Long> rowIds = new HashSet<Long>();
+        Set<Long> domainIdsToUpdate = _domainDao.getDomainParentIds(domainId);
         for (Long domainIdToUpdate : domainIdsToUpdate) {
             ResourceCountVO domainCountRecord = findByOwnerAndType(domainIdToUpdate, ResourceOwnerType.Domain, type);
             if (domainCountRecord != null) {
@@ -135,34 +134,34 @@ public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long>
             }
         }
         return rowIds;
-	}
-	
-	@Override
-	public Set<Long> listAllRowsToUpdate(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
-	    Set<Long> rowIds = new HashSet<Long>();
-	    
-	    if (ownerType == ResourceOwnerType.Account) {
-	        //get records for account
-	        ResourceCountVO accountCountRecord = findByOwnerAndType(ownerId, ResourceOwnerType.Account, type);
-	        if (accountCountRecord != null) {
-	            rowIds.add(accountCountRecord.getId());
-	        }
-	        
-	        //get records for account's domain and all its parent domains
-	        rowIds.addAll(listRowsToUpdateForDomain(_accountDao.findByIdIncludingRemoved(ownerId).getDomainId(),type));
-	    } else if (ownerType == ResourceOwnerType.Domain) {
-	        return listRowsToUpdateForDomain(ownerId, type);
-	    } 
-	    
-	    return rowIds;
-	}
-	
-	@Override @DB
+    }
+
+    @Override
+    public Set<Long> listAllRowsToUpdate(long ownerId, ResourceOwnerType ownerType, ResourceType type) {
+        Set<Long> rowIds = new HashSet<Long>();
+
+        if (ownerType == ResourceOwnerType.Account) {
+            //get records for account
+            ResourceCountVO accountCountRecord = findByOwnerAndType(ownerId, ResourceOwnerType.Account, type);
+            if (accountCountRecord != null) {
+                rowIds.add(accountCountRecord.getId());
+            }
+
+            //get records for account's domain and all its parent domains
+            rowIds.addAll(listRowsToUpdateForDomain(_accountDao.findByIdIncludingRemoved(ownerId).getDomainId(),type));
+        } else if (ownerType == ResourceOwnerType.Domain) {
+            return listRowsToUpdateForDomain(ownerId, type);
+        } 
+
+        return rowIds;
+    }
+
+    @Override @DB
     public void createResourceCounts(long ownerId, ResourceLimit.ResourceOwnerType ownerType){
-        
+
         Transaction txn = Transaction.currentTxn();
         txn.start();
-        
+
         ResourceType[] resourceTypes = Resource.ResourceType.values();
         for (ResourceType resourceType : resourceTypes) {
             if (!resourceType.supportsOwner(ownerType)) {
@@ -171,24 +170,24 @@ public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long>
             ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, ownerId, ownerType);
             persist(resourceCountVO);
         }
-        
+
         txn.commit();
     }
-	
-	private List<ResourceCountVO> listByDomainId(long domainId) {
-	    SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
+
+    private List<ResourceCountVO> listByDomainId(long domainId) {
+        SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
         sc.setParameters("domainId", domainId);
 
         return listBy(sc);
-	}
-	
+    }
+
     private List<ResourceCountVO> listByAccountId(long accountId) {
-	    SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
+        SearchCriteria<ResourceCountVO> sc = TypeSearch.create();
         sc.setParameters("accountId", accountId);
 
         return listBy(sc);
-	}
-    
+    }
+
     @Override
     public List<ResourceCountVO> listByOwnerId(long ownerId, ResourceOwnerType ownerType) {
         if (ownerType == ResourceOwnerType.Account) {
@@ -199,26 +198,26 @@ public class ResourceCountDaoImpl extends GenericDaoBase<ResourceCountVO, Long>
             return new ArrayList<ResourceCountVO>();
         }
     }
-	
-	@Override
-	public List<ResourceCountVO> listResourceCountByOwnerType(ResourceOwnerType ownerType) {
-	    if (ownerType == ResourceOwnerType.Account) {
-	        return listBy(AccountSearch.create());
-	    } else if (ownerType == ResourceOwnerType.Domain) {
-	        return listBy(DomainSearch.create());
-	    } else {
-	        return new ArrayList<ResourceCountVO>();
-	    }
-	}
-	
-	@Override
+
+    @Override
+    public List<ResourceCountVO> listResourceCountByOwnerType(ResourceOwnerType ownerType) {
+        if (ownerType == ResourceOwnerType.Account) {
+            return listBy(AccountSearch.create());
+        } else if (ownerType == ResourceOwnerType.Domain) {
+            return listBy(DomainSearch.create());
+        } else {
+            return new ArrayList<ResourceCountVO>();
+        }
+    }
+
+    @Override
     public ResourceCountVO persist(ResourceCountVO resourceCountVO){
-	    ResourceOwnerType ownerType = resourceCountVO.getResourceOwnerType();
-	    ResourceType resourceType = resourceCountVO.getType();
-	    if (!resourceType.supportsOwner(ownerType)) {
-	        throw new UnsupportedServiceException("Resource type " + resourceType + " is not supported for owner of type " + ownerType.getName());
-	    }
-	    
+        ResourceOwnerType ownerType = resourceCountVO.getResourceOwnerType();
+        ResourceType resourceType = resourceCountVO.getType();
+        if (!resourceType.supportsOwner(ownerType)) {
+            throw new UnsupportedServiceException("Resource type " + resourceType + " is not supported for owner of type " + ownerType.getName());
+        }
+
         return super.persist(resourceCountVO);
     }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java
index a3de946..a239997 100755
--- a/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java
+++ b/server/src/com/cloud/consoleproxy/AgentBasedConsoleProxyManager.java
@@ -49,7 +49,6 @@ import com.cloud.host.dao.HostDao;
 import com.cloud.info.ConsoleProxyInfo;
 import com.cloud.network.Network;
 import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.vm.ConsoleProxyVO;
 import com.cloud.vm.ReservationContext;
 import com.cloud.vm.UserVmVO;
@@ -87,9 +86,9 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
     VirtualMachineManager _itMgr;
     @Inject
     protected ConsoleProxyDao _cpDao;
-    
+
     @Inject ConfigurationDao _configDao;
-    
+
     public int getVncPort(VMInstanceVO vm) {
         if (vm.getHostId() == null) {
             return -1;
@@ -112,7 +111,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
         if (value != null) {
             _consoleProxyUrlPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_URL_PORT);
         }
-        
+
         value = configs.get("consoleproxy.port");
         if (value != null) {
             _consoleProxyPort = NumbersUtil.parseInt(value, ConsoleProxyManager.DEFAULT_PROXY_VNC_PORT);
@@ -126,10 +125,10 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
         _instance = configs.get("instance.name");
 
         _consoleProxyUrlDomain = configs.get("consoleproxy.url.domain");
-        
+
         _listener = new ConsoleProxyListener(this);
         _agentMgr.registerForHostEvents(_listener, true, true, false);
-        
+
         _itMgr.registerGuru(VirtualMachine.Type.ConsoleProxy, this);
 
         if (s_logger.isInfoEnabled()) {
@@ -177,20 +176,20 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
                 }
                 publicIp = host.getPrivateIpAddress();
             }
-            
+
             int urlPort = _consoleProxyUrlPort;
 
             if (host.getProxyPort() != null && host.getProxyPort().intValue() > 0) {
                 urlPort = host.getProxyPort().intValue();
             }
-            
+
             return new ConsoleProxyInfo(_sslEnabled, publicIp, _consoleProxyPort, urlPort, _consoleProxyUrlDomain);
         } else {
             s_logger.warn("Host that VM is running is no longer available, console access to VM " + userVmId + " will be temporarily unavailable.");
         }
         return null;
     }
-    
+
     @Override
     public void onLoadReport(ConsoleProxyLoadReportCommand cmd) {
     }
@@ -273,16 +272,16 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
     @Override
     public void setManagementState(ConsoleProxyManagementState state) {
     }
-    
+
     @Override
     public ConsoleProxyManagementState getManagementState() {
-    	return null;
+        return null;
     }
-    
+
     @Override
     public void resumeLastManagementState() {
     }
-    
+
     @Override
     public void startAgentHttpHandlerInVM(StartupProxyCommand startupCmd) {
     }
@@ -299,7 +298,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
         }
         return VirtualMachineName.getConsoleProxyId(vmName);
     }
-    
+
     @Override
     public ConsoleProxyVO findByName(String name) {
         // TODO Auto-generated method stub
@@ -329,7 +328,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
         // TODO Auto-generated method stub
         return false;
     }
-    
+
     @Override
     public boolean finalizeCommandsOnStart(Commands cmds, VirtualMachineProfile<ConsoleProxyVO> profile) {
         // TODO Auto-generated method stub
@@ -346,7 +345,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
     public void finalizeStop(VirtualMachineProfile<ConsoleProxyVO> profile, StopAnswer answer) {
         // TODO Auto-generated method stub
     }
-    
+
     @Override 
     public void finalizeExpunge(ConsoleProxyVO proxy) {
     }
@@ -366,7 +365,7 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
         //not supported
         throw new UnsupportedOperationException("Unplug nic is not supported for vm of type " + vm.getType());
     }
-    
+
     @Override 
     public void prepareStop(VirtualMachineProfile<ConsoleProxyVO> profile) {
     }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java b/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java
index 84f6fac..0a045eb 100644
--- a/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java
+++ b/server/src/com/cloud/consoleproxy/ConsoleProxyBalanceAllocator.java
@@ -27,9 +27,6 @@ import javax.naming.ConfigurationException;
 
 import org.springframework.stereotype.Component;
 
-import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.vm.ConsoleProxyVO;
 
 import edu.emory.mathcs.backport.java.util.Collections;
@@ -37,45 +34,45 @@ import edu.emory.mathcs.backport.java.util.Collections;
 @Component
 @Local(value={ConsoleProxyAllocator.class})
 public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator {
-	
+
     private String _name;
     private final Random _rand = new Random(System.currentTimeMillis());
-   
+
     @Override
-	public ConsoleProxyVO allocProxy(List<ConsoleProxyVO> candidates, final Map<Long, Integer> loadInfo, long dataCenterId) {
-    	if(candidates != null) {
-    		
-    		List<ConsoleProxyVO> allocationList = new ArrayList<ConsoleProxyVO>();
-    		for(ConsoleProxyVO proxy : candidates) {
-				allocationList.add(proxy);
-    		}
-    		
-    		Collections.sort(candidates, new Comparator<ConsoleProxyVO> () {
-				@Override
-				public int compare(ConsoleProxyVO x, ConsoleProxyVO y) {
-					Integer loadOfX = loadInfo.get(x.getId());
-					Integer loadOfY = loadInfo.get(y.getId());
-
-					if(loadOfX != null && loadOfY != null) {
-						if(loadOfX < loadOfY)
-							return -1;
-						else if(loadOfX > loadOfY)
-							return 1;
-						return 0;
-					} else if(loadOfX == null && loadOfY == null) {
-						return 0;
-					} else {
-						if(loadOfX == null)
-							return -1;
-						return 1;
-					}
-				}
-    		});
-    		
-    		if(allocationList.size() > 0)
-    			return allocationList.get(0);
-    	}
-    	return null;
+    public ConsoleProxyVO allocProxy(List<ConsoleProxyVO> candidates, final Map<Long, Integer> loadInfo, long dataCenterId) {
+        if(candidates != null) {
+
+            List<ConsoleProxyVO> allocationList = new ArrayList<ConsoleProxyVO>();
+            for(ConsoleProxyVO proxy : candidates) {
+                allocationList.add(proxy);
+            }
+
+            Collections.sort(candidates, new Comparator<ConsoleProxyVO> () {
+                @Override
+                public int compare(ConsoleProxyVO x, ConsoleProxyVO y) {
+                    Integer loadOfX = loadInfo.get(x.getId());
+                    Integer loadOfY = loadInfo.get(y.getId());
+
+                    if(loadOfX != null && loadOfY != null) {
+                        if(loadOfX < loadOfY)
+                            return -1;
+                        else if(loadOfX > loadOfY)
+                            return 1;
+                        return 0;
+                    } else if(loadOfX == null && loadOfY == null) {
+                        return 0;
+                    } else {
+                        if(loadOfX == null)
+                            return -1;
+                        return 1;
+                    }
+                }
+            });
+
+            if(allocationList.size() > 0)
+                return allocationList.get(0);
+        }
+        return null;
     }
 
     @Override
@@ -83,7 +80,7 @@ public class ConsoleProxyBalanceAllocator implements ConsoleProxyAllocator {
         _name = name;
         return true;
     }
-	
+
     @Override
     public String getName() {
         return _name;

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
index 6ceeb5f..353ef73 100755
--- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
+++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
@@ -19,7 +19,6 @@ package com.cloud.consoleproxy;
 import java.nio.charset.Charset;
 import java.util.ArrayList;
 import java.util.Date;
-import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -32,6 +31,7 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 import javax.persistence.Table;
 
+import org.apache.cloudstack.api.ServerApiException;
 import org.apache.log4j.Logger;
 import org.springframework.context.annotation.Primary;
 import org.springframework.stereotype.Component;
@@ -55,7 +55,6 @@ import com.cloud.agent.api.proxy.StartConsoleProxyAgentHttpHandlerCommand;
 import com.cloud.agent.api.to.NicTO;
 import com.cloud.agent.api.to.VirtualMachineTO;
 import com.cloud.agent.manager.Commands;
-import org.apache.cloudstack.api.ServerApiException;
 import com.cloud.api.commands.DestroyConsoleProxyCmd;
 import com.cloud.certificate.dao.CertificateDao;
 import com.cloud.cluster.ClusterManager;
@@ -128,8 +127,6 @@ import com.cloud.utils.DateUtil;
 import com.cloud.utils.NumbersUtil;
 import com.cloud.utils.Pair;
 import com.cloud.utils.Ternary;
-import com.cloud.utils.component.Adapters;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.component.Manager;
 import com.cloud.utils.db.DB;
 import com.cloud.utils.db.GlobalLock;
@@ -236,7 +233,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
     RulesManager _rulesMgr;
     @Inject
     IPAddressDao _ipAddressDao;
-    
+
     private ConsoleProxyListener _listener;
 
     private ServiceOfferingVO _serviceOffering;
@@ -269,7 +266,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
     private Map<Long, ZoneHostInfo> _zoneHostInfoMap; // map <zone id, info about running host in zone>
     private Map<Long, ConsoleProxyLoadInfo> _zoneProxyCountMap; // map <zone id, info about proxy VMs count in zone>
     private Map<Long, ConsoleProxyLoadInfo> _zoneVmCountMap; // map <zone id, info about running VMs count in zone>
-    
+
     private String _hashKey;
     private String _staticPublicIp;
     private int _staticPort;
@@ -478,9 +475,9 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         assert (ksVo != null);
 
         if (_staticPublicIp == null) {
-        return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), _consoleProxyPort, proxy.getPort(), ksVo.getDomainSuffix());
+            return new ConsoleProxyInfo(proxy.isSslEnabled(), proxy.getPublicIpAddress(), _consoleProxyPort, proxy.getPort(), ksVo.getDomainSuffix());
         } else {
-        	return new ConsoleProxyInfo(proxy.isSslEnabled(), _staticPublicIp, _consoleProxyPort, _staticPort, ksVo.getDomainSuffix());
+            return new ConsoleProxyInfo(proxy.isSslEnabled(), _staticPublicIp, _consoleProxyPort, _staticPort, ksVo.getDomainSuffix());
         }
     }
 
@@ -809,10 +806,10 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
 
     private ConsoleProxyAllocator getCurrentAllocator() {
         // for now, only one adapter is supported
-    	for(ConsoleProxyAllocator allocator : _consoleProxyAllocators) {
-    		return allocator;
-    	}
-    	
+        for(ConsoleProxyAllocator allocator : _consoleProxyAllocators) {
+            return allocator;
+        }
+
         return null;
     }
 
@@ -903,26 +900,26 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         }
 
         if(!cmd.isReauthenticating()) {
-	        String ticket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId());
-	        if (s_logger.isDebugEnabled()) {
-	            s_logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket);
-	        }
-	
-	        if (!ticket.equals(ticketInUrl)) {
-	            Date now = new Date();
-	            // considering of minute round-up
-	            String minuteEarlyTicket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000));
-	
-	            if (s_logger.isDebugEnabled()) {
-	                s_logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + minuteEarlyTicket);
-	            }
-	
-	            if (!minuteEarlyTicket.equals(ticketInUrl)) {
-	                s_logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + ", tickets to check against: " + ticket + ","
-	                        + minuteEarlyTicket);
-	                return new ConsoleAccessAuthenticationAnswer(cmd, false);
-	            }
-	        }
+            String ticket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId());
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Console authentication. Ticket in 1 minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + ticket);
+            }
+
+            if (!ticket.equals(ticketInUrl)) {
+                Date now = new Date();
+                // considering of minute round-up
+                String minuteEarlyTicket = ConsoleProxyServlet.genAccessTicket(cmd.getHost(), cmd.getPort(), cmd.getSid(), cmd.getVmId(), new Date(now.getTime() - 60 * 1000));
+
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Console authentication. Ticket in 2-minute boundary for " + cmd.getHost() + ":" + cmd.getPort() + "-" + cmd.getVmId() + " is " + minuteEarlyTicket);
+                }
+
+                if (!minuteEarlyTicket.equals(ticketInUrl)) {
+                    s_logger.error("Access ticket expired or has been modified. vmId: " + cmd.getVmId() + "ticket in URL: " + ticketInUrl + ", tickets to check against: " + ticket + ","
+                            + minuteEarlyTicket);
+                    return new ConsoleAccessAuthenticationAnswer(cmd, false);
+                }
+            }
         }
 
         if (cmd.getVmId() != null && cmd.getVmId().isEmpty()) {
@@ -959,38 +956,38 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
             s_logger.warn("sid " + sid + " in url does not match stored sid " + vm.getVncPassword());
             return new ConsoleAccessAuthenticationAnswer(cmd, false);
         }
-        
+
         if(cmd.isReauthenticating()) {
             ConsoleAccessAuthenticationAnswer authenticationAnswer = new ConsoleAccessAuthenticationAnswer(cmd, true);
             authenticationAnswer.setReauthenticating(true);
 
             s_logger.info("Re-authentication request, ask host " + vm.getHostId() + " for new console info");
-        	GetVncPortAnswer answer = (GetVncPortAnswer) _agentMgr.easySend(vm.getHostId(), new 
-            	GetVncPortCommand(vm.getId(), vm.getInstanceName()));
+            GetVncPortAnswer answer = (GetVncPortAnswer) _agentMgr.easySend(vm.getHostId(), new 
+                    GetVncPortCommand(vm.getId(), vm.getInstanceName()));
 
             if (answer != null && answer.getResult()) {
-            	Ternary<String, String, String> parsedHostInfo = ConsoleProxyServlet.parseHostInfo(answer.getAddress());
-            	
-        		if(parsedHostInfo.second() != null  && parsedHostInfo.third() != null) {
-        			
+                Ternary<String, String, String> parsedHostInfo = ConsoleProxyServlet.parseHostInfo(answer.getAddress());
+
+                if(parsedHostInfo.second() != null  && parsedHostInfo.third() != null) {
+
                     s_logger.info("Re-authentication result. vm: " + vm.getId() + ", tunnel url: " + parsedHostInfo.second()
-                    	+ ", tunnel session: " + parsedHostInfo.third());
-        			
-        			authenticationAnswer.setTunnelUrl(parsedHostInfo.second());
-        			authenticationAnswer.setTunnelSession(parsedHostInfo.third());
-        		} else {
+                            + ", tunnel session: " + parsedHostInfo.third());
+
+                    authenticationAnswer.setTunnelUrl(parsedHostInfo.second());
+                    authenticationAnswer.setTunnelSession(parsedHostInfo.third());
+                } else {
                     s_logger.info("Re-authentication result. vm: " + vm.getId() + ", host address: " + parsedHostInfo.first()
-                        	+ ", port: " + answer.getPort());
-        			
-        			authenticationAnswer.setHost(parsedHostInfo.first());
-        			authenticationAnswer.setPort(answer.getPort());
-        		}
+                            + ", port: " + answer.getPort());
+
+                    authenticationAnswer.setHost(parsedHostInfo.first());
+                    authenticationAnswer.setPort(answer.getPort());
+                }
             } else {
                 s_logger.warn("Re-authentication request failed");
-            	
-            	authenticationAnswer.setSuccess(false);
+
+                authenticationAnswer.setSuccess(false);
             }
-            
+
             return authenticationAnswer;
         }
 
@@ -1198,11 +1195,11 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
                 }
             } else {
                 if (s_logger.isDebugEnabled()) {
-                	if (secondaryStorageHost != null) {
-                		s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() +  " is not ready on secondary storage: " + secondaryStorageHost.getId());
-                	} else {
-                		s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() +  " is not ready on secondary storage.");
-                	}
+                    if (secondaryStorageHost != null) {
+                        s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() +  " is not ready on secondary storage: " + secondaryStorageHost.getId());
+                    } else {
+                        s_logger.debug("Zone host is ready, but console proxy template: " + template.getId() +  " is not ready on secondary storage.");
+                    }
                 }
             }
         }
@@ -1411,7 +1408,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
                     result = result && _hostDao.remove(host.getId());
                 }
             }
-            
+
             return result;
         } catch (ResourceUnavailableException e) {
             s_logger.warn("Unable to expunge " + proxy, e);
@@ -1514,11 +1511,11 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         _itMgr.registerGuru(VirtualMachine.Type.ConsoleProxy, this);
 
         boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key()));
-        
+
         //check if there is a default service offering configured
         String cpvmSrvcOffIdStr = configs.get(Config.ConsoleProxyServiceOffering.key()); 
         if (cpvmSrvcOffIdStr != null) {
-            
+
             Long cpvmSrvcOffId = null;
             try {
                 cpvmSrvcOffId = _identityDao.getIdentityId(DiskOfferingVO.class.getAnnotation(Table.class).name(),cpvmSrvcOffIdStr);
@@ -1532,8 +1529,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         } 
 
         if(_serviceOffering == null || !_serviceOffering.getSystemUse()){
-        	int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE);
-        	int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ);
+            int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE);
+            int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ);
             _serviceOffering = new ServiceOfferingVO("System Offering For Console Proxy", 1, ramSize, cpuFreq, 0, 0, false, null, useLocalStorage, true, null, true, VirtualMachine.Type.ConsoleProxy, true);
             _serviceOffering.setUniqueName(ServiceOffering.consoleProxyDefaultOffUniqueName);
             _serviceOffering = _offeringDao.persistSystemServiceOffering(_serviceOffering);
@@ -1552,7 +1549,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
 
         _staticPublicIp = _configDao.getValue("consoleproxy.static.publicIp");
         if (_staticPublicIp != null) {
-        	_staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443);
+            _staticPort = NumbersUtil.parseInt(_configDao.getValue("consoleproxy.static.port"), 8443);
         }
 
         if (s_logger.isInfoEnabled()) {
@@ -2011,7 +2008,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         sc.addAnd(sc.getEntity().getName(), Op.EQ, name);
         return sc.find();
     }
-    
+
     public String getHashKey() {
         // although we may have race conditioning here, database transaction serialization should
         // give us the same key
@@ -2036,8 +2033,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
         //not supported
         throw new UnsupportedOperationException("Unplug nic is not supported for vm of type " + vm.getType());
     }
-	
-	@Override
-	public void prepareStop(VirtualMachineProfile<ConsoleProxyVO> profile) {
-	}
+
+    @Override
+    public void prepareStop(VirtualMachineProfile<ConsoleProxyVO> profile) {
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java
index c28a2e4..13d3112 100755
--- a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java
+++ b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java
@@ -30,7 +30,6 @@ import com.cloud.host.Host.Type;
 import com.cloud.host.HostVO;
 import com.cloud.info.ConsoleProxyInfo;
 import com.cloud.resource.ResourceManager;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.vm.VMInstanceVO;
 import com.cloud.vm.dao.ConsoleProxyDao;
 
@@ -41,31 +40,31 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp
     @Inject ConsoleProxyDao _proxyDao;
     @Inject ResourceManager _resourceMgr;
     @Inject ConfigurationDao _configDao;
-    
+
     @Override
     protected HostVO findHost(VMInstanceVO vm) {
-        
+
         List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByType(Type.ConsoleProxy, vm.getDataCenterIdToDeployIn());
-        
+
         return hosts.isEmpty() ? null : hosts.get(0);
     }
-    
+
     @Override
     public ConsoleProxyInfo assignProxy(long dataCenterId, long userVmId) {
         return new ConsoleProxyInfo(false, _ip, _consoleProxyPort, _consoleProxyUrlPort, _consoleProxyUrlDomain);
     }
-    
+
     @Override
     public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
         super.configure(name, params);
-        
+
         Map<String, String> dbParams = _configDao.getConfiguration("ManagementServer", params);
-        
+
         _ip = dbParams.get("public.ip");
         if (_ip == null) {
             _ip = "127.0.0.1";
         }
-        
+
         return true;
     }
 }

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/56e5fbde/server/src/com/cloud/dc/dao/ClusterDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/dc/dao/ClusterDaoImpl.java b/server/src/com/cloud/dc/dao/ClusterDaoImpl.java
index f06b24d..86dc65e 100644
--- a/server/src/com/cloud/dc/dao/ClusterDaoImpl.java
+++ b/server/src/com/cloud/dc/dao/ClusterDaoImpl.java
@@ -33,7 +33,6 @@ import com.cloud.dc.ClusterVO;
 import com.cloud.dc.HostPodVO;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.org.Grouping;
-import com.cloud.utils.component.ComponentLocator;
 import com.cloud.utils.db.GenericDaoBase;
 import com.cloud.utils.db.GenericSearchBuilder;
 import com.cloud.utils.db.JoinBuilder;
@@ -53,73 +52,73 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
     protected final SearchBuilder<ClusterVO> AvailHyperSearch;
     protected final SearchBuilder<ClusterVO> ZoneSearch;
     protected final SearchBuilder<ClusterVO> ZoneHyTypeSearch;
-    
+
     private static final String GET_POD_CLUSTER_MAP_PREFIX = "SELECT pod_id, id FROM cloud.cluster WHERE cluster.id IN( ";
     private static final String GET_POD_CLUSTER_MAP_SUFFIX = " )";
     @Inject
     protected HostPodDao _hostPodDao;
-    
+
     public ClusterDaoImpl() {
         super();
-        
+
         HyTypeWithoutGuidSearch = createSearchBuilder();
         HyTypeWithoutGuidSearch.and("hypervisorType", HyTypeWithoutGuidSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
         HyTypeWithoutGuidSearch.and("guid", HyTypeWithoutGuidSearch.entity().getGuid(), SearchCriteria.Op.NULL);
         HyTypeWithoutGuidSearch.done();
-        
+
         ZoneHyTypeSearch = createSearchBuilder();
         ZoneHyTypeSearch.and("hypervisorType", ZoneHyTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ);
         ZoneHyTypeSearch.and("dataCenterId", ZoneHyTypeSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
         ZoneHyTypeSearch.done();
-        
+
         PodSearch = createSearchBuilder();
         PodSearch.and("pod", PodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
         PodSearch.and("name", PodSearch.entity().getName(), SearchCriteria.Op.EQ);
         PodSearch.done();
-        
+
         ZoneSearch = createSearchBuilder();
         ZoneSearch.and("dataCenterId", ZoneSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
         ZoneSearch.groupBy(ZoneSearch.entity().getHypervisorType());
         ZoneSearch.done();
-        
+
         AvailHyperSearch = createSearchBuilder();
         AvailHyperSearch.and("zoneId", AvailHyperSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
         AvailHyperSearch.select(null, Func.DISTINCT, AvailHyperSearch.entity().getHypervisorType());
         AvailHyperSearch.done();
     }
-    
+
     @Override
     public List<ClusterVO> listByZoneId(long zoneId) {
         SearchCriteria<ClusterVO> sc = ZoneSearch.create();
         sc.setParameters("dataCenterId", zoneId);        
         return listBy(sc);
     }
-    
+
     @Override
     public List<ClusterVO> listByPodId(long podId) {
         SearchCriteria<ClusterVO> sc = PodSearch.create();
         sc.setParameters("pod", podId);
-        
+
         return listBy(sc);
     }
-    
+
     @Override
     public ClusterVO findBy(String name, long podId) {
         SearchCriteria<ClusterVO> sc = PodSearch.create();
         sc.setParameters("pod", podId);
         sc.setParameters("name", name);
-        
+
         return findOneBy(sc);
     }
-    
+
     @Override
     public List<ClusterVO> listByHyTypeWithoutGuid(String hyType) {
         SearchCriteria<ClusterVO> sc = HyTypeWithoutGuidSearch.create();
         sc.setParameters("hypervisorType", hyType);
-        
+
         return listBy(sc);
     }
-    
+
     @Override
     public List<ClusterVO> listByDcHyType(long dcId, String hyType) {
         SearchCriteria<ClusterVO> sc = ZoneHyTypeSearch.create();
@@ -127,7 +126,7 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
         sc.setParameters("hypervisorType", hyType);
         return listBy(sc);
     }
-    
+
     @Override
     public List<HypervisorType> getAvailableHypervisorInZone(Long zoneId) {
         SearchCriteria<ClusterVO> sc = AvailHyperSearch.create();
@@ -139,13 +138,13 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
         for (ClusterVO cluster : clusters) {
             hypers.add(cluster.getHypervisorType());
         }
-        
+
         return hypers;
     }
-    
+
     @Override
     public Map<Long, List<Long>> getPodClusterIdMap(List<Long> clusterIds){
-    	Transaction txn = Transaction.currentTxn();
+        Transaction txn = Transaction.currentTxn();
         PreparedStatement pstmt = null;
         Map<Long, List<Long>> result = new HashMap<Long, List<Long>>();
 
@@ -158,20 +157,20 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
                 sql.delete(sql.length()-1, sql.length());
                 sql.append(GET_POD_CLUSTER_MAP_SUFFIX);
             }
-            
+
             pstmt = txn.prepareAutoCloseStatement(sql.toString());
             ResultSet rs = pstmt.executeQuery();
             while (rs.next()) {
-            	Long podId = rs.getLong(1);
-            	Long clusterIdInPod  = rs.getLong(2);
+                Long podId = rs.getLong(1);
+                Long clusterIdInPod  = rs.getLong(2);
                 if(result.containsKey(podId)){
-                   	List<Long> clusterList = result.get(podId);
-                	clusterList.add(clusterIdInPod);
-                	result.put(podId, clusterList);
+                    List<Long> clusterList = result.get(podId);
+                    clusterList.add(clusterIdInPod);
+                    result.put(podId, clusterList);
                 }else{
-                	List<Long> clusterList = new ArrayList<Long>();
-                	clusterList.add(clusterIdInPod);
-                	result.put(podId, clusterList);
+                    List<Long> clusterList = new ArrayList<Long>();
+                    clusterList.add(clusterIdInPod);
+                    result.put(podId, clusterList);
                 }
             }
             return result;
@@ -181,49 +180,49 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
             throw new CloudRuntimeException("Caught: " + GET_POD_CLUSTER_MAP_PREFIX, e);
         }
     }
-    
+
     @Override
     public List<Long> listDisabledClusters(long zoneId, Long podId) {
-    	GenericSearchBuilder<ClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
-    	clusterIdSearch.selectField(clusterIdSearch.entity().getId());
-    	clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ);
-    	if(podId != null){
-    		clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ);
-    	}
-    	clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ);
-    	clusterIdSearch.done();
-
-    	
-    	SearchCriteria<Long> sc = clusterIdSearch.create();
+        GenericSearchBuilder<ClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
+        clusterIdSearch.selectField(clusterIdSearch.entity().getId());
+        clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ);
+        if(podId != null){
+            clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ);
+        }
+        clusterIdSearch.and("allocationState", clusterIdSearch.entity().getAllocationState(), Op.EQ);
+        clusterIdSearch.done();
+
+
+        SearchCriteria<Long> sc = clusterIdSearch.create();
         sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
         if (podId != null) {
-	        sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
-	    }
+            sc.addAnd("podId", SearchCriteria.Op.EQ, podId);
+        }
         sc.addAnd("allocationState", SearchCriteria.Op.EQ, Grouping.AllocationState.Disabled);
         return customSearch(sc, null);
     }
 
     @Override
     public List<Long> listClustersWithDisabledPods(long zoneId) {
-    	
-    	GenericSearchBuilder<HostPodVO, Long> disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class);
-    	disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId());
-    	disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ);
-    	disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ);
-
-    	GenericSearchBuilder<ClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
-    	clusterIdSearch.selectField(clusterIdSearch.entity().getId());
-    	clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER);
-    	clusterIdSearch.done();
-
-    	
-    	SearchCriteria<Long> sc = clusterIdSearch.create();
+
+        GenericSearchBuilder<HostPodVO, Long> disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class);
+        disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId());
+        disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ);
+        disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ);
+
+        GenericSearchBuilder<ClusterVO, Long> clusterIdSearch = createSearchBuilder(Long.class);
+        clusterIdSearch.selectField(clusterIdSearch.entity().getId());
+        clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER);
+        clusterIdSearch.done();
+
+
+        SearchCriteria<Long> sc = clusterIdSearch.create();
         sc.setJoinParameters("disabledPodIdSearch", "dataCenterId", zoneId);
         sc.setJoinParameters("disabledPodIdSearch", "allocationState", Grouping.AllocationState.Disabled);
-        
+
         return customSearch(sc, null);
     }
-    
+
     @Override
     public boolean remove(Long id) {
         Transaction txn = Transaction.currentTxn();
@@ -231,7 +230,7 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
         ClusterVO cluster = createForUpdate();
         cluster.setName(null);
         cluster.setGuid(null);
-        
+
         update(id, cluster);
 
         boolean result = super.remove(id);


Mime
View raw message