cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From raj...@apache.org
Subject [31/35] git commit: updated refs/heads/master to 8bc0294
Date Mon, 31 Aug 2015 06:01:51 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
----------------------------------------------------------------------
diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index f793efb..d407bb1 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -30,6 +30,7 @@ import java.util.concurrent.ExecutionException;
 import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
+import org.apache.log4j.Logger;
 
 import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService;
 import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
@@ -130,6 +131,7 @@ import com.cloud.vm.VmWorkTakeVolumeSnapshot;
 import com.cloud.vm.dao.UserVmDao;
 
 public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
+    private static final Logger s_logger = Logger.getLogger(VolumeOrchestrator.class);
 
     @Inject
     EntityManager _entityMgr;
@@ -341,8 +343,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
             if (pool == null) {
                 //pool could not be found in the VM's pod/cluster.
-                if (logger.isDebugEnabled()) {
-                    logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid());
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("Could not find any storage pool to create Volume in the pod/cluster of the provided VM " + vm.getUuid());
                 }
                 StringBuilder addDetails = new StringBuilder(msg);
                 addDetails.append(", Could not find any storage pool to create Volume in the pod/cluster of the VM ");
@@ -359,8 +361,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 }
 
                 if (pool != null) {
-                    if (logger.isDebugEnabled()) {
-                        logger.debug("Found a suitable pool for create volume: " + pool.getId());
+                    if (s_logger.isDebugEnabled()) {
+                        s_logger.debug("Found a suitable pool for create volume: " + pool.getId());
                     }
                     break;
                 }
@@ -368,7 +370,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         }
 
         if (pool == null) {
-            logger.info(msg);
+            s_logger.info(msg);
             throw new StorageUnavailableException(msg, -1);
         }
 
@@ -387,7 +389,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 _snapshotSrv.syncVolumeSnapshotsToRegionStore(snapVolId, snapStore);
             } catch (Exception ex) {
                 // log but ignore the sync error to avoid any potential S3 down issue, it should be sync next time
-                logger.warn(ex.getMessage(), ex);
+                s_logger.warn(ex.getMessage(), ex);
             }
         }
 
@@ -396,15 +398,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                logger.debug("Failed to create volume from snapshot:" + result.getResult());
+                s_logger.debug("Failed to create volume from snapshot:" + result.getResult());
                 throw new CloudRuntimeException("Failed to create volume from snapshot:" + result.getResult());
             }
             return result.getVolume();
         } catch (InterruptedException e) {
-            logger.debug("Failed to create volume from snapshot", e);
+            s_logger.debug("Failed to create volume from snapshot", e);
             throw new CloudRuntimeException("Failed to create volume from snapshot", e);
         } catch (ExecutionException e) {
-            logger.debug("Failed to create volume from snapshot", e);
+            s_logger.debug("Failed to create volume from snapshot", e);
             throw new CloudRuntimeException("Failed to create volume from snapshot", e);
         }
 
@@ -464,15 +466,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                logger.debug("copy volume failed: " + result.getResult());
+                s_logger.debug("copy volume failed: " + result.getResult());
                 throw new CloudRuntimeException("copy volume failed: " + result.getResult());
             }
             return result.getVolume();
         } catch (InterruptedException e) {
-            logger.debug("Failed to copy volume: " + volume.getId(), e);
+            s_logger.debug("Failed to copy volume: " + volume.getId(), e);
             throw new CloudRuntimeException("Failed to copy volume", e);
         } catch (ExecutionException e) {
-            logger.debug("Failed to copy volume: " + volume.getId(), e);
+            s_logger.debug("Failed to copy volume: " + volume.getId(), e);
             throw new CloudRuntimeException("Failed to copy volume", e);
         }
     }
@@ -502,12 +504,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         pool = findStoragePool(dskCh, dc, pod, clusterId, vm.getHostId(), vm, avoidPools);
         if (pool == null) {
-            logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName());
+            s_logger.warn("Unable to find suitable primary storage when creating volume " + volume.getName());
             throw new CloudRuntimeException("Unable to find suitable primary storage when creating volume " + volume.getName());
         }
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Trying to create " + volume + " on " + pool);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Trying to create " + volume + " on " + pool);
         }
         DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
         for (int i = 0; i < 2; i++) {
@@ -524,20 +526,20 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 VolumeApiResult result = future.get();
                 if (result.isFailed()) {
                     if (result.getResult().contains("request template reload") && (i == 0)) {
-                        logger.debug("Retry template re-deploy for vmware");
+                        s_logger.debug("Retry template re-deploy for vmware");
                         continue;
                     } else {
-                        logger.debug("create volume failed: " + result.getResult());
+                        s_logger.debug("create volume failed: " + result.getResult());
                         throw new CloudRuntimeException("create volume failed:" + result.getResult());
                     }
                 }
 
                 return result.getVolume();
             } catch (InterruptedException e) {
-                logger.error("create volume failed", e);
+                s_logger.error("create volume failed", e);
                 throw new CloudRuntimeException("create volume failed", e);
             } catch (ExecutionException e) {
-                logger.error("create volume failed", e);
+                s_logger.error("create volume failed", e);
                 throw new CloudRuntimeException("create volume failed", e);
             }
         }
@@ -672,10 +674,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         if (rootDisksize != null ) {
             rootDisksize = rootDisksize * 1024 * 1024 * 1024;
             if (rootDisksize > size) {
-                logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
+                s_logger.debug("Using root disk size of " + rootDisksize + " Bytes for volume " + name);
                 size = rootDisksize;
             } else {
-                logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template");
+                s_logger.debug("Using root disk size of " + size + " Bytes for volume " + name + "since specified root disk size of " + rootDisksize + " Bytes is smaller than template");
             }
         }
 
@@ -816,8 +818,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         Long volTemplateId = existingVolume.getTemplateId();
         long vmTemplateId = vm.getTemplateId();
         if (volTemplateId != null && volTemplateId.longValue() != vmTemplateId) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("switchVolume: Old Volume's templateId: " + volTemplateId + " does not match the VM's templateId: " + vmTemplateId
                         + ", updating templateId in the new Volume");
             }
             templateIdToUse = vmTemplateId;
@@ -831,16 +833,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 try {
                     stateTransitTo(existingVolume, Volume.Event.DestroyRequested);
                 } catch (NoTransitionException e) {
-                    logger.debug("Unable to destroy existing volume: " + e.toString());
+                    s_logger.debug("Unable to destroy existing volume: " + e.toString());
                 }
                 // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk
                 if (vm.getHypervisorType() == HypervisorType.VMware) {
-                    logger.info("Expunging volume " + existingVolume.getId() + " from primary data store");
+                    s_logger.info("Expunging volume " + existingVolume.getId() + " from primary data store");
                     AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId()));
                     try {
                         future.get();
                     } catch (Exception e) {
-                        logger.debug("Failed to expunge volume:" + existingVolume.getId(), e);
+                        s_logger.debug("Failed to expunge volume:" + existingVolume.getId(), e);
                     }
                 }
 
@@ -857,8 +859,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     @Override
     @DB
     public void cleanupVolumes(long vmId) throws ConcurrentOperationException {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Cleaning storage for vm: " + vmId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Cleaning storage for vm: " + vmId);
         }
         final List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId);
         final List<VolumeVO> toBeExpunged = new ArrayList<VolumeVO>();
@@ -873,12 +875,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                         if (!volumeAlreadyDestroyed) {
                             volService.destroyVolume(vol.getId());
                         } else {
-                            logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
+                            s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString());
                         }
                         toBeExpunged.add(vol);
                     } else {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Detaching " + vol);
+                        if (s_logger.isDebugEnabled()) {
+                            s_logger.debug("Detaching " + vol);
                         }
                         _volsDao.detachVolume(vol.getId());
                     }
@@ -892,9 +894,9 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             try {
                 future.get();
             } catch (InterruptedException e) {
-                logger.debug("failed expunge volume" + expunge.getId(), e);
+                s_logger.debug("failed expunge volume" + expunge.getId(), e);
             } catch (ExecutionException e) {
-                logger.debug("failed expunge volume" + expunge.getId(), e);
+                s_logger.debug("failed expunge volume" + expunge.getId(), e);
             }
         }
     }
@@ -936,7 +938,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                logger.error("Migrate volume failed:" + result.getResult());
+                s_logger.error("Migrate volume failed:" + result.getResult());
                 throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId());
             } else {
                 // update the volumeId for snapshots on secondary
@@ -947,10 +949,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             }
             return result.getVolume();
         } catch (InterruptedException e) {
-            logger.debug("migrate volume failed", e);
+            s_logger.debug("migrate volume failed", e);
             throw new CloudRuntimeException(e.getMessage());
         } catch (ExecutionException e) {
-            logger.debug("migrate volume failed", e);
+            s_logger.debug("migrate volume failed", e);
             throw new CloudRuntimeException(e.getMessage());
         }
     }
@@ -962,15 +964,15 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             VolumeApiResult result = future.get();
             if (result.isFailed()) {
-                logger.debug("migrate volume failed:" + result.getResult());
+                s_logger.debug("migrate volume failed:" + result.getResult());
                 return null;
             }
             return result.getVolume();
         } catch (InterruptedException e) {
-            logger.debug("migrate volume failed", e);
+            s_logger.debug("migrate volume failed", e);
             return null;
         } catch (ExecutionException e) {
-            logger.debug("migrate volume failed", e);
+            s_logger.debug("migrate volume failed", e);
             return null;
         }
     }
@@ -1001,13 +1003,13 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         try {
             CommandResult result = future.get();
             if (result.isFailed()) {
-                logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
+                s_logger.debug("Failed to migrated vm " + vm + " along with its volumes. " + result.getResult());
                 throw new CloudRuntimeException("Failed to migrated vm " + vm + " along with its volumes. ");
             }
         } catch (InterruptedException e) {
-            logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
+            s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
         } catch (ExecutionException e) {
-            logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
+            s_logger.debug("Failed to migrated vm " + vm + " along with its volumes.", e);
         }
     }
 
@@ -1018,12 +1020,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
         for (VolumeVO volume : vols) {
             if (volume.getState() != Volume.State.Ready) {
-                logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state");
+                s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state");
                 throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state");
             }
 
             if (volume.getPoolId() == destPool.getId()) {
-                logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId());
+                s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId());
                 continue;
             }
 
@@ -1031,7 +1033,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         }
 
         if (volumesNeedToMigrate.isEmpty()) {
-            logger.debug("No volume need to be migrated");
+            s_logger.debug("No volume need to be migrated");
             return true;
         }
 
@@ -1047,8 +1049,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     @Override
     public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest) {
         List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
-        if (logger.isDebugEnabled()) {
-            logger.debug("Preparing " + vols.size() + " volumes for " + vm);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Preparing " + vols.size() + " volumes for " + vm);
         }
 
         for (VolumeVO vol : vols) {
@@ -1136,21 +1138,21 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     tasks.add(task);
                 } else {
                     if (vol.isRecreatable()) {
-                        if (logger.isDebugEnabled()) {
-                            logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner");
+                        if (s_logger.isDebugEnabled()) {
+                            s_logger.debug("Volume " + vol + " will be recreated on storage pool " + assignedPool + " assigned by deploymentPlanner");
                         }
                         VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
                         tasks.add(task);
                     } else {
                         if (assignedPool.getId() != vol.getPoolId()) {
-                            if (logger.isDebugEnabled()) {
-                                logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol);
+                            if (s_logger.isDebugEnabled()) {
+                                s_logger.debug("Mismatch in storage pool " + assignedPool + " assigned by deploymentPlanner and the one associated with volume " + vol);
                             }
                             DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
                             if (diskOffering.getUseLocalStorage()) {
                                 // Currently migration of local volume is not supported so bail out
-                                if (logger.isDebugEnabled()) {
-                                    logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
+                                if (s_logger.isDebugEnabled()) {
+                                    s_logger.debug("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
                                 }
                                 throw new CloudRuntimeException("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner");
                             } else {
@@ -1163,8 +1165,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                                     storageMigrationEnabled = StorageMigrationEnabled.value();
                                 }
                                 if(storageMigrationEnabled){
-                                    if (logger.isDebugEnabled()) {
-                                        logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
+                                    if (s_logger.isDebugEnabled()) {
+                                        s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner");
                                     }
                                     VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool);
                                     tasks.add(task);
@@ -1185,8 +1187,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol,
                             Volume.class, vol.getId());
                 }
-                if (logger.isDebugEnabled()) {
-                    logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
+                if (s_logger.isDebugEnabled()) {
+                    s_logger.debug("No need to recreate the volume: " + vol + ", since it already has a pool assigned: " + vol.getPoolId() + ", adding disk to VM");
                 }
                 StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId());
                 VolumeTask task = new VolumeTask(VolumeTaskType.NOP, vol, pool);
@@ -1203,7 +1205,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         DataStore destPool = null;
         if (recreate && (dest.getStorageForDisks() == null || dest.getStorageForDisks().get(vol) == null)) {
             destPool = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary);
-            logger.debug("existing pool: " + destPool.getId());
+            s_logger.debug("existing pool: " + destPool.getId());
         } else {
             StoragePool pool = dest.getStorageForDisks().get(vol);
             destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
@@ -1220,8 +1222,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 dest.getStorageForDisks().put(newVol, poolWithOldVol);
                 dest.getStorageForDisks().remove(vol);
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Created new volume " + newVol + " for old volume " + vol);
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Created new volume " + newVol + " for old volume " + vol);
             }
         }
         VolumeInfo volume = volFactory.getVolume(newVol.getId(), destPool);
@@ -1243,7 +1245,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
 
                 TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId());
                 if (templ == null) {
-                    logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
+                    s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
                     throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId());
                 }
 
@@ -1269,10 +1271,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 result = future.get();
                 if (result.isFailed()) {
                     if (result.getResult().contains("request template reload") && (i == 0)) {
-                        logger.debug("Retry template re-deploy for vmware");
+                        s_logger.debug("Retry template re-deploy for vmware");
                         continue;
                     } else {
-                        logger.debug("Unable to create " + newVol + ":" + result.getResult());
+                        s_logger.debug("Unable to create " + newVol + ":" + result.getResult());
                         throw new StorageUnavailableException("Unable to create " + newVol + ":" + result.getResult(), destPool.getId());
                     }
                 }
@@ -1289,10 +1291,10 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                 newVol = _volsDao.findById(newVol.getId());
                 break; //break out of template-redeploy retry loop
             } catch (InterruptedException e) {
-                logger.error("Unable to create " + newVol, e);
+                s_logger.error("Unable to create " + newVol, e);
                 throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
             } catch (ExecutionException e) {
-                logger.error("Unable to create " + newVol, e);
+                s_logger.error("Unable to create " + newVol, e);
                 throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId());
             }
         }
@@ -1304,8 +1306,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
     public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException {
 
         if (dest == null) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm);
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("DeployDestination cannot be null, cannot prepare Volumes for the vm: " + vm);
             }
             throw new CloudRuntimeException("Unable to prepare Volume for vm because DeployDestination is null, vm:" + vm);
         }
@@ -1316,8 +1318,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         }
 
         List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
-        if (logger.isDebugEnabled()) {
-            logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
         }
 
         List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
@@ -1396,7 +1398,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         }
 
         if (volume.getState().equals(Volume.State.Creating)) {
-            logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop");
+            s_logger.debug("Remove volume: " + volume.getId() + ", as it's leftover from last mgt server stop");
             _volsDao.remove(volume.getId());
         }
     }
@@ -1411,11 +1413,11 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         if (volume.getState() == Volume.State.Migrating) {
             VolumeVO duplicateVol = _volsDao.findByPoolIdName(destPoolId, volume.getName());
             if (duplicateVol != null) {
-                logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId);
+                s_logger.debug("Remove volume " + duplicateVol.getId() + " on storage pool " + destPoolId);
                 _volsDao.remove(duplicateVol.getId());
             }
 
-            logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId);
+            s_logger.debug("change volume state to ready from migrating in case migration failure for vol: " + volumeId);
             volume.setState(Volume.State.Ready);
             _volsDao.update(volumeId, volume);
         }
@@ -1426,7 +1428,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
         _snapshotSrv.cleanupVolumeDuringSnapshotFailure(volumeId, snapshotId);
         VolumeVO volume = _volsDao.findById(volumeId);
         if (volume.getState() == Volume.State.Snapshotting) {
-            logger.debug("change volume state back to Ready: " + volume.getId());
+            s_logger.debug("change volume state back to Ready: " + volume.getId());
             volume.setState(Volume.State.Ready);
             _volsDao.update(volume.getId(), volume);
         }
@@ -1451,7 +1453,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
                     cleanupVolumeDuringSnapshotFailure(work.getVolumeId(), work.getSnapshotId());
                 }
             } catch (Exception e) {
-                logger.debug("clean up job failure, will continue", e);
+                s_logger.debug("clean up job failure, will continue", e);
             }
         }
     }
@@ -1484,7 +1486,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             //FIXME - why recalculate and not decrement
             _resourceLimitMgr.recalculateResourceCount(volume.getAccountId(), volume.getDomainId(), ResourceType.primary_storage.getOrdinal());
         } catch (Exception e) {
-            logger.debug("Failed to destroy volume" + volume.getId(), e);
+            s_logger.debug("Failed to destroy volume" + volume.getId(), e);
             throw new CloudRuntimeException("Failed to destroy volume" + volume.getId(), e);
         }
     }
@@ -1515,7 +1517,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
             needUpdate = true;
 
         if (needUpdate) {
-            logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);
+            s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo);
             vol.setPath(path);
             vol.setChainInfo(chainInfo);
             _volsDao.update(volumeId, vol);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
index 946e08c..2bd6bcc 100644
--- a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
+++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java
@@ -28,6 +28,7 @@ import java.util.Map.Entry;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
@@ -51,6 +52,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
 @Component
 @Local(value = {CapacityDao.class})
 public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements CapacityDao {
+    private static final Logger s_logger = Logger.getLogger(CapacityDaoImpl.class);
 
     private static final String ADD_ALLOCATED_SQL = "UPDATE `cloud`.`op_host_capacity` SET used_capacity = used_capacity + ? WHERE host_id = ? AND capacity_type = ?";
     private static final String SUBTRACT_ALLOCATED_SQL =
@@ -521,7 +523,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
             txn.commit();
         } catch (Exception e) {
             txn.rollback();
-            logger.warn("Exception updating capacity for host: " + hostId, e);
+            s_logger.warn("Exception updating capacity for host: " + hostId, e);
         }
     }
 
@@ -986,7 +988,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
             }
             pstmt.executeUpdate();
         } catch (Exception e) {
-            logger.warn("Error updating CapacityVO", e);
+            s_logger.warn("Error updating CapacityVO", e);
         }
     }
 
@@ -1006,7 +1008,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
                 return rs.getFloat(1);
             }
         } catch (Exception e) {
-            logger.warn("Error checking cluster threshold", e);
+            s_logger.warn("Error checking cluster threshold", e);
         }
         return 0;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
index d6d944d..978fee0 100644
--- a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
+++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java
@@ -18,6 +18,7 @@ package com.cloud.certificate.dao;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.certificate.CertificateVO;
@@ -29,6 +30,7 @@ import com.cloud.utils.db.GenericDaoBase;
 @DB
 public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long> implements CertificateDao {
 
+    private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class);
 
     public CertificateDaoImpl() {
 
@@ -42,7 +44,7 @@ public class CertificateDaoImpl extends GenericDaoBase<CertificateVO, Long> impl
             update(cert.getId(), cert);
             return cert.getId();
         } catch (Exception e) {
-            logger.warn("Unable to read the certificate: " + e);
+            s_logger.warn("Unable to read the certificate: " + e);
             return new Long(0);
         }
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
index 493ee48..483ea45 100644
--- a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
+++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
@@ -22,6 +22,7 @@ import java.util.List;
 import javax.annotation.PostConstruct;
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.cluster.agentlb.HostTransferMapVO;
@@ -35,6 +36,7 @@ import com.cloud.utils.db.SearchCriteria;
 @Local(value = {HostTransferMapDao.class})
 @DB
 public class HostTransferMapDaoImpl extends GenericDaoBase<HostTransferMapVO, Long> implements HostTransferMapDao {
+    private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
 
     protected SearchBuilder<HostTransferMapVO> AllFieldsSearch;
     protected SearchBuilder<HostTransferMapVO> IntermediateStateSearch;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java
index 5c216e7..373446e 100644
--- a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java
@@ -26,6 +26,7 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 import javax.persistence.TableGenerator;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.DataCenterDetailVO;
@@ -57,6 +58,7 @@ import com.cloud.utils.net.NetUtils;
 @Component
 @Local(value = {DataCenterDao.class})
 public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implements DataCenterDao {
+    private static final Logger s_logger = Logger.getLogger(DataCenterDaoImpl.class);
 
     protected SearchBuilder<DataCenterVO> NameSearch;
     protected SearchBuilder<DataCenterVO> ListZonesByDomainIdSearch;
@@ -410,7 +412,7 @@ public class DataCenterDaoImpl extends GenericDaoBase<DataCenterVO, Long> implem
                     Long dcId = Long.parseLong(tokenOrIdOrName);
                     return findById(dcId);
                 } catch (NumberFormatException nfe) {
-                    logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe);
+                    s_logger.debug("Cannot parse " + tokenOrIdOrName + " into long. " + nfe);
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java
index 21501c5..ca79eed 100644
--- a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.DataCenterIpAddressVO;
@@ -40,6 +41,7 @@ import com.cloud.utils.net.NetUtils;
 @Local(value = {DataCenterIpAddressDao.class})
 @DB
 public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddressVO, Long> implements DataCenterIpAddressDao {
+    private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class);
 
     private final SearchBuilder<DataCenterIpAddressVO> AllFieldsSearch;
     private final GenericSearchBuilder<DataCenterIpAddressVO, Integer> AllIpCount;
@@ -142,8 +144,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
 
     @Override
     public void releaseIpAddress(String ipAddress, long dcId, Long instanceId) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
         }
         SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
         sc.setParameters("ip", ipAddress);
@@ -160,8 +162,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
 
     @Override
     public void releaseIpAddress(long nicId, String reservationId) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Releasing ip address for reservationId=" + reservationId + ", instance=" + nicId);
         }
         SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
         sc.setParameters("instance", nicId);
@@ -176,8 +178,8 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase<DataCenterIpAddre
 
     @Override
     public void releaseIpAddress(long nicId) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing ip address for instance=" + nicId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Releasing ip address for instance=" + nicId);
         }
         SearchCriteria<DataCenterIpAddressVO> sc = AllFieldsSearch.create();
         sc.setParameters("instance", nicId);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java
index ca10761..5917123 100644
--- a/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/dao/DataCenterLinkLocalIpAddressDaoImpl.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import javax.ejb.Local;
 import javax.naming.ConfigurationException;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.DataCenterLinkLocalIpAddressVO;
@@ -42,6 +43,7 @@ import com.cloud.utils.net.NetUtils;
 @Local(value = {DataCenterLinkLocalIpAddressDaoImpl.class})
 @DB
 public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase<DataCenterLinkLocalIpAddressVO, Long> implements DataCenterLinkLocalIpAddressDao {
+    private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class);
 
     private final SearchBuilder<DataCenterLinkLocalIpAddressVO> AllFieldsSearch;
     private final GenericSearchBuilder<DataCenterLinkLocalIpAddressVO, Integer> AllIpCount;
@@ -105,8 +107,8 @@ public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase<DataCent
 
     @Override
     public void releaseIpAddress(String ipAddress, long dcId, long instanceId) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Releasing ip address: " + ipAddress + " data center " + dcId);
         }
         SearchCriteria<DataCenterLinkLocalIpAddressVO> sc = AllFieldsSearch.create();
         sc.setParameters("ip", ipAddress);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java
index 0dd943c..1137eb8 100644
--- a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java
+++ b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java
@@ -25,6 +25,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.HostPodVO;
@@ -39,6 +40,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = {HostPodDao.class})
 public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements HostPodDao {
+    private static final Logger s_logger = Logger.getLogger(HostPodDaoImpl.class);
 
     protected SearchBuilder<HostPodVO> DataCenterAndNameSearch;
     protected SearchBuilder<HostPodVO> DataCenterIdSearch;
@@ -100,7 +102,7 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
                 currentPodCidrSubnets.put(podId, cidrPair);
             }
         } catch (SQLException ex) {
-            logger.warn("DB exception " + ex.getMessage(), ex);
+            s_logger.warn("DB exception " + ex.getMessage(), ex);
             return null;
         }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java
index ca2a0b5..223172e 100644
--- a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java
+++ b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java
@@ -25,6 +25,7 @@ import java.util.Set;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.domain.Domain;
@@ -39,6 +40,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = {DomainDao.class})
 public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements DomainDao {
+    private static final Logger s_logger = Logger.getLogger(DomainDaoImpl.class);
 
     protected SearchBuilder<DomainVO> DomainNameLikeSearch;
     protected SearchBuilder<DomainVO> ParentDomainNameLikeSearch;
@@ -110,7 +112,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
 
         DomainVO parentDomain = findById(parent);
         if (parentDomain == null) {
-            logger.error("Unable to load parent domain: " + parent);
+            s_logger.error("Unable to load parent domain: " + parent);
             return null;
         }
 
@@ -120,7 +122,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
 
             parentDomain = this.lockRow(parent, true);
             if (parentDomain == null) {
-                logger.error("Unable to lock parent domain: " + parent);
+                s_logger.error("Unable to lock parent domain: " + parent);
                 return null;
             }
 
@@ -135,7 +137,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
             txn.commit();
             return domain;
         } catch (Exception e) {
-            logger.error("Unable to create domain due to " + e.getMessage(), e);
+            s_logger.error("Unable to create domain due to " + e.getMessage(), e);
             txn.rollback();
             return null;
         }
@@ -146,23 +148,23 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
     public boolean remove(Long id) {
         // check for any active users / domains assigned to the given domain id and don't remove the domain if there are any
         if (id != null && id.longValue() == Domain.ROOT_DOMAIN) {
-            logger.error("Can not remove domain " + id + " as it is ROOT domain");
+            s_logger.error("Can not remove domain " + id + " as it is ROOT domain");
             return false;
         } else {
             if(id == null) {
-                logger.error("Can not remove domain without id.");
+                s_logger.error("Can not remove domain without id.");
                 return false;
             }
         }
 
         DomainVO domain = findById(id);
         if (domain == null) {
-            logger.info("Unable to remove domain as domain " + id + " no longer exists");
+            s_logger.info("Unable to remove domain as domain " + id + " no longer exists");
             return true;
         }
 
         if (domain.getParent() == null) {
-            logger.error("Invalid domain " + id + ", orphan?");
+            s_logger.error("Invalid domain " + id + ", orphan?");
             return false;
         }
 
@@ -175,7 +177,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
             txn.start();
             DomainVO parentDomain = super.lockRow(domain.getParent(), true);
             if (parentDomain == null) {
-                logger.error("Unable to load parent domain: " + domain.getParent());
+                s_logger.error("Unable to load parent domain: " + domain.getParent());
                 return false;
             }
 
@@ -196,7 +198,7 @@ public class DomainDaoImpl extends GenericDaoBase<DomainVO, Long> implements Dom
             txn.commit();
         } catch (SQLException ex) {
             success = false;
-            logger.error("error removing domain: " + id, ex);
+            s_logger.error("error removing domain: " + id, ex);
             txn.rollback();
         }
         return success;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java
index 3ad5fe0..be589e7 100644
--- a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java
+++ b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java
@@ -21,6 +21,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.event.Event.State;
@@ -35,6 +36,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = {EventDao.class})
 public class EventDaoImpl extends GenericDaoBase<EventVO, Long> implements EventDao {
+    public static final Logger s_logger = Logger.getLogger(EventDaoImpl.class.getName());
     protected final SearchBuilder<EventVO> CompletedEventSearch;
     protected final SearchBuilder<EventVO> ToArchiveOrDeleteEventSearch;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java
index 8134ea8..1739254 100644
--- a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java
+++ b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java
@@ -26,6 +26,7 @@ import java.util.TimeZone;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.Vlan;
@@ -43,6 +44,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
 @Component
 @Local(value = {UsageEventDao.class})
 public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implements UsageEventDao {
+    public static final Logger s_logger = Logger.getLogger(UsageEventDaoImpl.class.getName());
 
     private final SearchBuilder<UsageEventVO> latestEventsSearch;
     private final SearchBuilder<UsageEventVO> IpeventsSearch;
@@ -101,8 +103,8 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
         // Copy events from cloud db to usage db
         String sql = COPY_EVENTS;
         if (recentEventId == 0) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("no recent event date, copying all events");
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("no recent event date, copying all events");
             }
             sql = COPY_ALL_EVENTS;
         }
@@ -120,7 +122,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
             txn.commit();
         } catch (Exception ex) {
             txn.rollback();
-            logger.error("error copying events from cloud db to usage db", ex);
+            s_logger.error("error copying events from cloud db to usage db", ex);
             throw new CloudRuntimeException(ex.getMessage());
         } finally {
             txn.close();
@@ -129,8 +131,8 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
         // Copy event details from cloud db to usage db
         sql = COPY_EVENT_DETAILS;
         if (recentEventId == 0) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("no recent event date, copying all event detailss");
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("no recent event date, copying all event detailss");
             }
             sql = COPY_ALL_EVENT_DETAILS;
         }
@@ -148,7 +150,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
             txn.commit();
         } catch (Exception ex) {
             txn.rollback();
-            logger.error("error copying event details from cloud db to usage db", ex);
+            s_logger.error("error copying event details from cloud db to usage db", ex);
             throw new CloudRuntimeException(ex.getMessage());
         } finally {
             txn.close();
@@ -171,7 +173,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
             }
             return 0;
         } catch (Exception ex) {
-            logger.error("error getting most recent event id", ex);
+            s_logger.error("error getting most recent event id", ex);
             throw new CloudRuntimeException(ex.getMessage());
         } finally {
             txn.close();
@@ -183,7 +185,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
         try {
             return listLatestEvents(endDate);
         } catch (Exception ex) {
-            logger.error("error getting most recent event date", ex);
+            s_logger.error("error getting most recent event date", ex);
             throw new CloudRuntimeException(ex.getMessage());
         } finally {
             txn.close();
@@ -203,7 +205,7 @@ public class UsageEventDaoImpl extends GenericDaoBase<UsageEventVO, Long> implem
             }
             return 0;
         } catch (Exception ex) {
-            logger.error("error getting max event id", ex);
+            s_logger.error("error getting max event id", ex);
             throw new CloudRuntimeException(ex.getMessage());
         } finally {
             txn.close();

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java b/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java
index 583ba79..35d77c1 100644
--- a/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java
+++ b/engine/schema/src/com/cloud/event/dao/UsageEventDetailsDaoImpl.java
@@ -21,6 +21,7 @@ import java.util.Map;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.event.UsageEventDetailsVO;
@@ -32,6 +33,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = {UsageEventDetailsDao.class})
 public class UsageEventDetailsDaoImpl extends GenericDaoBase<UsageEventDetailsVO, Long> implements UsageEventDetailsDao {
+    public static final Logger s_logger = Logger.getLogger(UsageEventDetailsDaoImpl.class.getName());
 
     protected final SearchBuilder<UsageEventDetailsVO> EventDetailsSearch;
     protected final SearchBuilder<UsageEventDetailsVO> DetailSearch;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java b/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java
index 669a18b..6bddea2 100644
--- a/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java
+++ b/engine/schema/src/com/cloud/gpu/dao/HostGpuGroupsDaoImpl.java
@@ -20,6 +20,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.gpu.HostGpuGroupsVO;
@@ -31,6 +32,7 @@ import com.cloud.utils.db.SearchCriteria;
 @Component
 @Local(value = HostGpuGroupsDao.class)
 public class HostGpuGroupsDaoImpl extends GenericDaoBase<HostGpuGroupsVO, Long> implements HostGpuGroupsDao {
+    private static final Logger s_logger = Logger.getLogger(HostGpuGroupsDaoImpl.class);
 
     private final SearchBuilder<HostGpuGroupsVO> _hostIdGroupNameSearch;
     private final SearchBuilder<HostGpuGroupsVO> _searchByHostId;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java b/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java
index 6fb774c..96e3a62 100644
--- a/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java
+++ b/engine/schema/src/com/cloud/gpu/dao/VGPUTypesDaoImpl.java
@@ -28,6 +28,7 @@ import java.util.Map.Entry;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.agent.api.VgpuTypesInfo;
@@ -42,6 +43,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
 @Component
 @Local(value = VGPUTypesDao.class)
 public class VGPUTypesDaoImpl extends GenericDaoBase<VGPUTypesVO, Long> implements VGPUTypesDao {
+    private static final Logger s_logger = Logger.getLogger(VGPUTypesDaoImpl.class);
 
     private final SearchBuilder<VGPUTypesVO> _searchByGroupId;
     private final SearchBuilder<VGPUTypesVO> _searchByGroupIdVGPUType;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java
index 578a19d..8342f1f 100644
--- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java
+++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java
@@ -31,6 +31,7 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 import javax.persistence.TableGenerator;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.agent.api.VgpuTypesInfo;
@@ -70,6 +71,9 @@ import com.cloud.utils.exception.CloudRuntimeException;
 @DB
 @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1)
 public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao { //FIXME: , ExternalIdDao {
+    private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class);
+    private static final Logger status_logger = Logger.getLogger(Status.class);
+    private static final Logger state_logger = Logger.getLogger(ResourceState.class);
 
     protected SearchBuilder<HostVO> TypePodDcStatusSearch;
 
@@ -285,7 +289,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
         try {
             HostTransferSearch = _hostTransferDao.createSearchBuilder();
         } catch (Throwable e) {
-            logger.debug("error", e);
+            s_logger.debug("error", e);
         }
         HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL);
         UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(),
@@ -441,8 +445,8 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
             sb.append(" ");
         }
 
-        if (logger.isTraceEnabled()) {
-            logger.trace("Following hosts got reset: " + sb.toString());
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace("Following hosts got reset: " + sb.toString());
         }
     }
 
@@ -501,19 +505,19 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
         TransactionLegacy txn = TransactionLegacy.currentTxn();
 
         txn.start();
-        if (logger.isDebugEnabled()) {
-            logger.debug("Resetting hosts suitable for reconnect");
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Resetting hosts suitable for reconnect");
         }
         // reset hosts that are suitable candidates for reconnect
         resetHosts(managementServerId, lastPingSecondsAfter);
-        if (logger.isDebugEnabled()) {
-            logger.debug("Completed resetting hosts suitable for reconnect");
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Completed resetting hosts suitable for reconnect");
         }
 
         List<HostVO> assignedHosts = new ArrayList<HostVO>();
 
-        if (logger.isDebugEnabled()) {
-            logger.debug("Acquiring hosts for clusters already owned by this management server");
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Acquiring hosts for clusters already owned by this management server");
         }
         List<Long> clusters = findClustersOwnedByManagementServer(managementServerId);
         if (clusters.size() > 0) {
@@ -531,17 +535,17 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                 sb.append(host.getId());
                 sb.append(" ");
             }
-            if (logger.isTraceEnabled()) {
-                logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString());
+            if (s_logger.isTraceEnabled()) {
+                s_logger.trace("Following hosts got acquired for clusters already owned: " + sb.toString());
             }
         }
-        if (logger.isDebugEnabled()) {
-            logger.debug("Completed acquiring hosts for clusters already owned by this management server");
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Completed acquiring hosts for clusters already owned by this management server");
         }
 
         if (assignedHosts.size() < limit) {
-            if (logger.isDebugEnabled()) {
-                logger.debug("Acquiring hosts for clusters not owned by any management server");
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Acquiring hosts for clusters not owned by any management server");
             }
             // for remaining hosts not owned by any MS check if they can be owned (by owning full cluster)
             clusters = findClustersForHostsNotOwnedByAnyManagementServer();
@@ -581,12 +585,12 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                         break;
                     }
                 }
-                if (logger.isTraceEnabled()) {
-                    logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString());
+                if (s_logger.isTraceEnabled()) {
+                    s_logger.trace("Following hosts got acquired from newly owned clusters: " + sb.toString());
                 }
             }
-            if (logger.isDebugEnabled()) {
-                logger.debug("Completed acquiring hosts for clusters not owned by any management server");
+            if (s_logger.isDebugEnabled()) {
+                s_logger.debug("Completed acquiring hosts for clusters not owned by any management server");
             }
         }
         txn.commit();
@@ -750,7 +754,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                 }
             }
         } catch (SQLException e) {
-            logger.warn("Exception: ", e);
+            s_logger.warn("Exception: ", e);
         }
         return result;
     }
@@ -861,15 +865,15 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                 l.add(info);
             }
         } catch (SQLException e) {
-            logger.debug("SQLException caught", e);
+            s_logger.debug("SQLException caught", e);
         }
         return l;
     }
 
     @Override
     public long getNextSequence(long hostId) {
-        if (logger.isTraceEnabled()) {
-            logger.trace("getNextSequence(), hostId: " + hostId);
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace("getNextSequence(), hostId: " + hostId);
         }
 
         TableGenerator tg = _tgs.get("host_req_sq");
@@ -949,7 +953,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
             HostVO ho = findById(host.getId());
             assert ho != null : "How how how? : " + host.getId();
 
-            if (logger.isDebugEnabled()) {
+            if (status_logger.isDebugEnabled()) {
 
                 StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString());
                 str.append(". Name=").append(host.getName());
@@ -971,7 +975,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                         .append(":old update count=")
                         .append(oldUpdateCount)
                         .append("]");
-                logger.debug(str.toString());
+                status_logger.debug(str.toString());
             } else {
                 StringBuilder msg = new StringBuilder("Agent status update: [");
                 msg.append("id = " + host.getId());
@@ -981,11 +985,11 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
                 msg.append("; new status = " + newStatus);
                 msg.append("; old update count = " + oldUpdateCount);
                 msg.append("; new update count = " + newUpdateCount + "]");
-                logger.debug(msg.toString());
+                status_logger.debug(msg.toString());
             }
 
             if (ho.getState() == newStatus) {
-                logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus);
+                status_logger.debug("Host " + ho.getName() + " state has already been updated to " + newStatus);
                 return true;
             }
         }
@@ -1011,7 +1015,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
         int result = update(ub, sc, null);
         assert result <= 1 : "How can this update " + result + " rows? ";
 
-        if (logger.isDebugEnabled() && result == 0) {
+        if (state_logger.isDebugEnabled() && result == 0) {
             HostVO ho = findById(host.getId());
             assert ho != null : "How how how? : " + host.getId();
 
@@ -1021,7 +1025,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
             str.append("; old state = " + oldState);
             str.append("; event = " + event);
             str.append("; new state = " + newState + "]");
-            logger.debug(str.toString());
+            state_logger.debug(str.toString());
         } else {
             StringBuilder msg = new StringBuilder("Resource state update: [");
             msg.append("id = " + host.getId());
@@ -1029,7 +1033,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
             msg.append("; old state = " + oldState);
             msg.append("; event = " + event);
             msg.append("; new state = " + newState + "]");
-            logger.debug(msg.toString());
+            state_logger.debug(msg.toString());
         }
 
         return result > 0;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
index b8f07cb..2b94e69 100644
--- a/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
+++ b/engine/schema/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java
@@ -20,6 +20,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
@@ -32,6 +33,7 @@ import com.cloud.utils.db.SearchCriteria;
 @Local(value = HypervisorCapabilitiesDao.class)
 public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase<HypervisorCapabilitiesVO, Long> implements HypervisorCapabilitiesDao {
 
+    private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class);
 
     protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeSearch;
     protected final SearchBuilder<HypervisorCapabilitiesVO> HypervisorTypeAndVersionSearch;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java
index 35be85b..e89536b 100644
--- a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java
@@ -21,6 +21,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.utils.db.DB;
@@ -32,6 +33,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = FirewallRulesCidrsDao.class)
 public class FirewallRulesCidrsDaoImpl extends GenericDaoBase<FirewallRulesCidrsVO, Long> implements FirewallRulesCidrsDao {
+    private static final Logger s_logger = Logger.getLogger(FirewallRulesCidrsDaoImpl.class);
     protected final SearchBuilder<FirewallRulesCidrsVO> CidrsSearch;
 
     protected FirewallRulesCidrsDaoImpl() {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
index 460630b..5122876 100644
--- a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java
@@ -26,6 +26,7 @@ import javax.ejb.Local;
 import javax.inject.Inject;
 
 import org.apache.cloudstack.resourcedetail.dao.UserIpAddressDetailsDao;
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.dc.Vlan.VlanType;
@@ -49,6 +50,7 @@ import com.cloud.utils.net.Ip;
 @Local(value = {IPAddressDao.class})
 @DB
 public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implements IPAddressDao {
+    private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class);
 
     protected SearchBuilder<IPAddressVO> AllFieldsSearch;
     protected SearchBuilder<IPAddressVO> VlanDbIdSearchUnallocated;
@@ -320,7 +322,7 @@ public class IPAddressDaoImpl extends GenericDaoBase<IPAddressVO, Long> implemen
                 ipCount = rs.getInt(1);
             }
         } catch (Exception e) {
-            logger.warn("Exception counting IP addresses", e);
+            s_logger.warn("Exception counting IP addresses", e);
         }
 
         return ipCount;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java
index 5a81947..211280d 100644
--- a/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/PortProfileDaoImpl.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.utils.db.DB;
@@ -37,6 +38,7 @@ import com.cloud.utils.exception.CloudRuntimeException;
 @Local(value = PortProfileDao.class)
 @DB()
 public class PortProfileDaoImpl extends GenericDaoBase<PortProfileVO, Long> implements PortProfileDao {
+    protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class);
 
     final SearchBuilder<PortProfileVO> nameSearch;
     final SearchBuilder<PortProfileVO> accessVlanSearch;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java
index 34d96b9..965d433 100644
--- a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java
@@ -20,6 +20,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.RemoteAccessVpn;
@@ -30,6 +31,7 @@ import com.cloud.utils.db.SearchCriteria;
 @Component
 @Local(value = {RemoteAccessVpnDao.class})
 public class RemoteAccessVpnDaoImpl extends GenericDaoBase<RemoteAccessVpnVO, Long> implements RemoteAccessVpnDao {
+    private static final Logger s_logger = Logger.getLogger(RemoteAccessVpnDaoImpl.class);
 
     private final SearchBuilder<RemoteAccessVpnVO> AllFieldsSearch;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java
index 8329f3d..e32533f 100644
--- a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnConnectionDaoImpl.java
@@ -22,6 +22,7 @@ import javax.annotation.PostConstruct;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.utils.db.GenericDaoBase;
@@ -32,6 +33,7 @@ import com.cloud.utils.db.SearchCriteria;
 @Component
 @Local(value = {Site2SiteVpnConnectionDao.class})
 public class Site2SiteVpnConnectionDaoImpl extends GenericDaoBase<Site2SiteVpnConnectionVO, Long> implements Site2SiteVpnConnectionDao {
+    private static final Logger s_logger = Logger.getLogger(Site2SiteVpnConnectionDaoImpl.class);
 
     @Inject
     protected IPAddressDao _addrDao;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java
index 7951b05..658ca0a 100644
--- a/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/Site2SiteVpnGatewayDaoImpl.java
@@ -19,6 +19,7 @@ package com.cloud.network.dao;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.utils.db.GenericDaoBase;
@@ -31,6 +32,7 @@ public class Site2SiteVpnGatewayDaoImpl extends GenericDaoBase<Site2SiteVpnGatew
     @Inject
     protected IPAddressDao _addrDao;
 
+    private static final Logger s_logger = Logger.getLogger(Site2SiteVpnGatewayDaoImpl.class);
 
     private final SearchBuilder<Site2SiteVpnGatewayVO> AllFieldsSearch;
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java
index c0ba455..f0ba199 100644
--- a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java
@@ -20,6 +20,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.UserIpv6AddressVO;
@@ -33,6 +34,7 @@ import com.cloud.utils.db.SearchCriteria.Op;
 @Component
 @Local(value = UserIpv6AddressDao.class)
 public class UserIpv6AddressDaoImpl extends GenericDaoBase<UserIpv6AddressVO, Long> implements UserIpv6AddressDao {
+    private static final Logger s_logger = Logger.getLogger(IPAddressDaoImpl.class);
 
     protected final SearchBuilder<UserIpv6AddressVO> AllFieldsSearch;
     protected GenericSearchBuilder<UserIpv6AddressVO, Long> CountFreePublicIps;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java b/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java
index e430910..8fe9375 100644
--- a/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java
@@ -27,6 +27,7 @@ import java.util.Set;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.security.VmRulesetLogVO;
@@ -38,6 +39,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = {VmRulesetLogDao.class})
 public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> implements VmRulesetLogDao {
+    protected static final Logger s_logger = Logger.getLogger(VmRulesetLogDaoImpl.class);
     private SearchBuilder<VmRulesetLogVO> VmIdSearch;
     private String InsertOrUpdateSQl = "INSERT INTO op_vm_ruleset_log (instance_id, created, logsequence) "
         + " VALUES(?, now(), 1) ON DUPLICATE KEY UPDATE logsequence=logsequence+1";
@@ -98,19 +100,19 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
             } catch (SQLTransactionRollbackException e1) {
                 if (i < maxTries - 1) {
                     int delayMs = (i + 1) * 1000;
-                    logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs);
+                    s_logger.debug("Caught a deadlock exception while inserting security group rule log, retrying in " + delayMs);
                     try {
                         Thread.sleep(delayMs);
                     } catch (InterruptedException ie) {
-                        logger.debug("[ignored] interupted while inserting security group rule logger.");
+                        s_logger.debug("[ignored] interupted while inserting security group rule log.");
                     }
                 } else
-                    logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
+                    s_logger.warn("Caught another deadlock exception while retrying inserting security group rule log, giving up");
 
             }
         }
-        if (logger.isTraceEnabled()) {
-            logger.trace("Inserted or updated " + numUpdated + " rows");
+        if (s_logger.isTraceEnabled()) {
+            s_logger.trace("Inserted or updated " + numUpdated + " rows");
         }
         return numUpdated;
     }
@@ -134,8 +136,8 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
                             vmIds.add(vmId);
                         }
                         int numUpdated = executeWithRetryOnDeadlock(txn, pstmt, vmIds);
-                        if (logger.isTraceEnabled()) {
-                            logger.trace("Inserted or updated " + numUpdated + " rows");
+                        if (s_logger.isTraceEnabled()) {
+                            s_logger.trace("Inserted or updated " + numUpdated + " rows");
                         }
                         if (numUpdated > 0)
                             count += stmtSize;
@@ -145,7 +147,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
 
             }
         } catch (SQLException sqe) {
-            logger.warn("Failed to execute multi insert ", sqe);
+            s_logger.warn("Failed to execute multi insert ", sqe);
         }
 
         return count;
@@ -173,10 +175,10 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
             queryResult = stmtInsert.executeBatch();
 
             txn.commit();
-            if (logger.isTraceEnabled())
-                logger.trace("Updated or inserted " + workItems.size() + " log items");
+            if (s_logger.isTraceEnabled())
+                s_logger.trace("Updated or inserted " + workItems.size() + " log items");
         } catch (SQLException e) {
-            logger.warn("Failed to execute batch update statement for ruleset log: ", e);
+            s_logger.warn("Failed to execute batch update statement for ruleset log: ", e);
             txn.rollback();
             success = false;
         }
@@ -185,7 +187,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase<VmRulesetLogVO, Long> im
             workItems.toArray(arrayItems);
             for (int i = 0; i < queryResult.length; i++) {
                 if (queryResult[i] < 0) {
-                    logger.debug("Batch query update failed for vm " + arrayItems[i]);
+                    s_logger.debug("Batch query update failed for vm " + arrayItems[i]);
                 }
             }
         }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java
index dd28fe3..4c2574e 100644
--- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemCidrsDaoImpl.java
@@ -23,6 +23,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.vpc.NetworkACLItemCidrsDao;
@@ -39,6 +40,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Component
 @Local(value = NetworkACLItemCidrsDao.class)
 public class NetworkACLItemCidrsDaoImpl extends GenericDaoBase<NetworkACLItemCidrsVO, Long> implements NetworkACLItemCidrsDao {
+    private static final Logger s_logger = Logger.getLogger(NetworkACLItemCidrsDaoImpl.class);
     protected final SearchBuilder<NetworkACLItemCidrsVO> cidrsSearch;
 
     protected NetworkACLItemCidrsDaoImpl() {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java
index d600868..201197c 100644
--- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java
@@ -21,6 +21,7 @@ import java.util.List;
 import javax.ejb.Local;
 import javax.inject.Inject;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.vpc.NetworkACLItem.State;
@@ -39,6 +40,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Local(value = NetworkACLItemDao.class)
 @DB()
 public class NetworkACLItemDaoImpl extends GenericDaoBase<NetworkACLItemVO, Long> implements NetworkACLItemDao {
+    private static final Logger s_logger = Logger.getLogger(NetworkACLItemDaoImpl.class);
 
     protected final SearchBuilder<NetworkACLItemVO> AllFieldsSearch;
     protected final SearchBuilder<NetworkACLItemVO> NotRevokedSearch;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java
index a276890..2178452 100644
--- a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java
+++ b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java
@@ -21,6 +21,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.network.vpc.PrivateIpVO;
@@ -37,6 +38,7 @@ import com.cloud.utils.db.TransactionLegacy;
 @Local(value = PrivateIpDao.class)
 @DB()
 public class PrivateIpDaoImpl extends GenericDaoBase<PrivateIpVO, Long> implements PrivateIpDao {
+    private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class);
 
     private final SearchBuilder<PrivateIpVO> AllFieldsSearch;
     private final GenericSearchBuilder<PrivateIpVO, Integer> CountAllocatedByNetworkId;
@@ -90,8 +92,8 @@ public class PrivateIpDaoImpl extends GenericDaoBase<PrivateIpVO, Long> implemen
 
     @Override
     public void releaseIpAddress(String ipAddress, long networkId) {
-        if (logger.isDebugEnabled()) {
-            logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId);
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Releasing private ip address: " + ipAddress + " network id " + networkId);
         }
         SearchCriteria<PrivateIpVO> sc = AllFieldsSearch.create();
         sc.setParameters("ip", ipAddress);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/8bc02940/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java
----------------------------------------------------------------------
diff --git a/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java b/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java
index 7565041..2ecf3fb 100644
--- a/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java
+++ b/engine/schema/src/com/cloud/projects/dao/ProjectAccountDaoImpl.java
@@ -20,6 +20,7 @@ import java.util.List;
 
 import javax.ejb.Local;
 
+import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
 
 import com.cloud.projects.ProjectAccount;
@@ -38,6 +39,7 @@ public class ProjectAccountDaoImpl extends GenericDaoBase<ProjectAccountVO, Long
     final GenericSearchBuilder<ProjectAccountVO, Long> AdminSearch;
     final GenericSearchBuilder<ProjectAccountVO, Long> ProjectAccountSearch;
     final GenericSearchBuilder<ProjectAccountVO, Long> CountByRoleSearch;
+    public static final Logger s_logger = Logger.getLogger(ProjectAccountDaoImpl.class.getName());
 
     protected ProjectAccountDaoImpl() {
         AllFieldsSearch = createSearchBuilder();
@@ -148,7 +150,7 @@ public class ProjectAccountDaoImpl extends GenericDaoBase<ProjectAccountVO, Long
 
         int rowsRemoved = remove(sc);
         if (rowsRemoved > 0) {
-            logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects");
+            s_logger.debug("Removed account id=" + accountId + " from " + rowsRemoved + " projects");
         }
     }
 


Mime
View raw message