incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ahu...@apache.org
Subject [2/8] Introduced plugins directory. Moved ovm into plugins. Introduced build.xml for ovm.
Date Wed, 20 Jun 2012 01:48:00 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
deleted file mode 100644
index 9816314..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-from OvmCommonModule import *
-from ConfigFileOps import *
-import os
-import logging
-
-class OvmSecurityGroup(OvmObject):
-
-    @staticmethod
-    def can_bridge_firewall():
-        try:
-            execute("which iptables")
-        except:
-            print "iptables was not found on the host"
-            return False
-
-        try:
-            execute("which ebtables")
-        except:
-            print "ebtables was not found on the host"
-            return False
-        
-        if not os.path.exists('/var/run/cloud'):
-            os.makedirs('/var/run/cloud')
-     
-        return OvmSecurityGroup.cleanup_rules()        
-
-    @staticmethod
-    def cleanup_rules():
-        try:
-            chainscmd = "iptables-save | grep '^:' | grep -v '.*-def' | awk '{print $1}' | cut -d':' -f2"
-            chains = execute(chainscmd).split('\n')
-            cleaned = 0
-            cleanup = []
-            for chain in chains:
-                if 1 in [ chain.startswith(c) for c in ['r-', 'i-', 's-', 'v-'] ]:
-                    vm_name = chain
-                else:
-                    continue
-                        
-                cmd = "xm list | grep " + vm_name 
-                try:
-                    result = execute(cmd)
-                except:
-                    result = None
-
-                if result == None or len(result) == 0:
-                    logging.debug("chain " + chain + " does not correspond to a vm, cleaning up")
-                    cleanup.append(vm_name)
-                    
-            for vm_name in cleanup:
-                OvmSecurityGroup.delete_all_network_rules_for_vm(vm_name)
-                        
-            logging.debug("Cleaned up rules for " + str(len(cleanup)) + " chains")
-            return True
-        except:
-            logging.debug("Failed to cleanup rules !")
-            return False
-
-    @staticmethod
-    def add_fw_framework(bridge_name):
-        try:
-            cfo = ConfigFileOps("/etc/sysctl.conf")
-            cfo.addEntry("net.bridge.bridge-nf-call-arptables", "1")
-            cfo.addEntry("net.bridge.bridge-nf-call-iptables", "1")
-            cfo.addEntry("net.bridge.bridge-nf-call-ip6tables", "1")
-            cfo.save()
-
-            execute("sysctl -p /etc/sysctl.conf")
-        except:
-            logging.debug("failed to turn on bridge netfilter")
-            return False
-
-        brfw = "BF-" + bridge_name
-        try:
-            execute("iptables -L " + brfw)
-        except:
-            execute("iptables -N " + brfw)
-
-        brfwout = brfw + "-OUT"
-        try:
-            execute("iptables -L " + brfwout)
-        except:
-            execute("iptables -N " + brfwout)
-
-        brfwin = brfw + "-IN"
-        try:
-            execute("iptables -L " + brfwin)
-        except:
-            execute("iptables -N " + brfwin)
-
-        try:
-            refs = execute("iptables -n -L  " + brfw + " |grep " + brfw + " | cut -d \( -f2 | awk '{print $1}'").strip()
-            if refs == "0":
-                execute("iptables -I FORWARD -i " + bridge_name + " -j DROP")
-                execute("iptables -I FORWARD -o " + bridge_name + " -j DROP")
-                execute("iptables -I FORWARD -i " + bridge_name + " -m physdev --physdev-is-bridged -j " + brfw)
-                execute("iptables -I FORWARD -o " + bridge_name + " -m physdev --physdev-is-bridged -j " + brfw)
-                phydev = execute("brctl show |grep " + bridge_name + " | awk '{print $4}'").strip()
-                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-out " + phydev + " -j ACCEPT")
-                execute("iptables -A " + brfw + " -m state --state RELATED,ESTABLISHED -j ACCEPT")
-                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-is-out -j " + brfwout)
-                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-is-in -j " + brfwin)              
-        
-            return True
-        except:
-            try:
-                execute("iptables -F " + brfw)
-            except:
-                return False
-            
-            return False
-
-    @staticmethod
-    def default_network_rules_user_vm(vm_name, vm_id, vm_ip, vm_mac, vif, bridge_name):
-        if not OvmSecurityGroup.add_fw_framework(bridge_name):
-            return False 
-
-        OvmSecurityGroup.delete_iptables_rules_for_vm(vm_name)
-        OvmSecurityGroup.delete_ebtables_rules_for_vm(vm_name)
-        
-        bridge_firewall_chain = "BF-" + bridge_name    
-        vm_chain = vm_name
-        default_vm_chain = '-'.join(vm_chain.split('-')[:-1]) + "-def"
-        dom_id = getDomId(vm_name)
-
-        try:
-            execute("iptables -N " + vm_chain)
-        except:
-            execute("iptables -F " + vm_chain)
-            
-        try:
-            execute("iptables -N " + default_vm_chain)
-        except:
-            execute("iptables -F " + default_vm_chain)
-
-        try:
-            execute("iptables -A " + bridge_firewall_chain + "-OUT" + " -m physdev --physdev-is-bridged --physdev-out " + vif + " -j " +  default_vm_chain)
-            execute("iptables -A " + bridge_firewall_chain + "-IN" + " -m physdev --physdev-is-bridged --physdev-in " +  vif + " -j " + default_vm_chain)
-            execute("iptables -A  " + default_vm_chain + " -m state --state RELATED,ESTABLISHED -j ACCEPT")
-
-            # Allow DHCP
-            execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-in " + vif + " -p udp --dport 67 --sport 68 -j ACCEPT")
-            execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-out " + vif + " -p udp --dport 68 --sport 67  -j ACCEPT")
-
-            # Don't let a VM spoof its ip address
-            if vm_ip is not None:
-                execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-in " + vif  + " --source " +  vm_ip +  " -j ACCEPT")
-
-            execute("iptables -A " + default_vm_chain + " -j " +  vm_chain)
-            execute("iptables -A " + vm_chain + " -j DROP")
-        except:
-            logging.debug("Failed to program default rules for vm " + vm_name)
-            return False
-        
-        OvmSecurityGroup.default_ebtables_rules(vm_chain, vm_ip, vm_mac, vif)
-        
-        if vm_ip is not None:
-            if (OvmSecurityGroup.write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, '_initial_', '-1') == False):
-                logging.debug("Failed to log default network rules, ignoring")
-            
-        logging.debug("Programmed default rules for vm " + vm_name)
-        return True
-
-    @staticmethod
-    def default_ebtables_rules(vm_name, vm_ip, vm_mac, vif):
-        vm_chain_in = vm_name + "-in"
-        vm_chain_out = vm_name + "-out"
-        
-        for chain in [vm_chain_in, vm_chain_out]:
-            try:
-                execute("ebtables -t nat -N " + chain)
-            except:
-                execute("ebtables -t nat -F " + chain) 
-
-        try:
-            execute("ebtables -t nat -A PREROUTING -i " + vif + " -j " +  vm_chain_in)
-            execute("ebtables -t nat -A POSTROUTING -o " + vif + " -j " + vm_chain_out)
-        except:
-            logging.debug("Failed to program default rules")
-            return False
-        
-        try:
-            execute("ebtables -t nat -A " +  vm_chain_in + " -s ! " +  vm_mac + " -j DROP")
-            execute("ebtables -t nat -A " +  vm_chain_in  + " -p ARP -s ! " + vm_mac + " -j DROP")
-            execute("ebtables -t nat -A " +  vm_chain_in  + " -p ARP --arp-mac-src ! " + vm_mac + " -j DROP")
-            if vm_ip is not None:
-                execute("ebtables -t nat -A " + vm_chain_in  +  " -p ARP --arp-ip-src ! " + vm_ip + " -j DROP")
-            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP --arp-op Request -j ACCEPT")   
-            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP --arp-op Reply -j ACCEPT")    
-            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP  -j DROP")    
-        except:
-            logging.exception("Failed to program default ebtables IN rules")
-            return False
-       
-        try:
-            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Reply --arp-mac-dst ! " +  vm_mac + " -j DROP")
-            if vm_ip is not None:
-                execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-ip-dst ! " + vm_ip + " -j DROP") 
-            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Request -j ACCEPT")   
-            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Reply -j ACCEPT")    
-            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP -j DROP")    
-        except:
-            logging.debug("Failed to program default ebtables OUT rules")
-            return False
-
-        return True
-
-    @staticmethod
-    def add_network_rules(vm_name, vm_id, vm_ip, signature, seqno, vm_mac, rules, vif, bridge_name):
-        try:
-            vm_chain = vm_name
-            dom_id = getDomId(vm_name)
-            
-            changes = []
-            changes = OvmSecurityGroup.check_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno)
-        
-            if not 1 in changes:
-                logging.debug("Rules already programmed for vm " + vm_name)
-                return True
-        
-            if changes[0] or changes[1] or changes[2] or changes[3]:
-                if not OvmSecurityGroup.default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, bridge_name):
-                    return False
-
-            if rules == "" or rules == None:
-                lines = []
-            else:
-                lines = rules.split(';')[:-1]
-
-            logging.debug("Programming network rules for  IP: " + vm_ip + " vmname=" + vm_name)
-            execute("iptables -F " + vm_chain)
-        
-            for line in lines:            
-                tokens = line.split(':')
-                if len(tokens) != 4:
-                    continue
-                protocol = tokens[0]
-                start = tokens[1]
-                end = tokens[2]
-                cidrs = tokens.pop();
-                ips = cidrs.split(",")
-                ips.pop()
-                allow_any = False
-                if  '0.0.0.0/0' in ips:
-                    i = ips.index('0.0.0.0/0')
-                    del ips[i]
-                    allow_any = True
-                    
-                port_range = start + ":" + end
-                if ips:    
-                    if protocol == 'all':
-                        for ip in ips:
-                            execute("iptables -I " + vm_chain + " -m state --state NEW -s " + ip + " -j ACCEPT")
-                    elif protocol != 'icmp':
-                        for ip in ips:
-                            execute("iptables -I " + vm_chain + " -p " + protocol + " -m " + protocol + " --dport " + port_range + " -m state --state NEW -s " + ip + " -j ACCEPT")
-                    else:
-                        port_range = start + "/" + end
-                        if start == "-1":
-                            port_range = "any"
-                            for ip in ips:
-                                execute("iptables -I " + vm_chain + " -p icmp --icmp-type " + port_range + " -s " + ip + " -j ACCEPT")
-            
-                if allow_any and protocol != 'all':
-                    if protocol != 'icmp':
-                        execute("iptables -I " + vm_chain + " -p " + protocol + " -m " +  protocol + " --dport " + port_range + " -m state --state NEW -j ACCEPT")
-                    else:
-                        port_range = start + "/" + end
-                        if start == "-1":
-                            port_range = "any"
-                            execute("iptables -I " + vm_chain + " -p icmp --icmp-type " + port_range + " -j ACCEPT")
-        
-            iptables =  "iptables -A " + vm_chain + " -j DROP"       
-            execute(iptables)
-            
-            return OvmSecurityGroup.write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno)        
-        except:
-            logging.debug("Failed to network rule !: " + sys.exc_type)
-            return False
-
-    @staticmethod
-    def delete_all_network_rules_for_vm(vm_name, vif = None):            
-        OvmSecurityGroup.delete_iptables_rules_for_vm(vm_name)
-        OvmSecurityGroup.delete_ebtables_rules_for_vm(vm_name)
-
-        vm_chain = vm_name
-        default_vm_chain = None
-        if vm_name.startswith('i-') or vm_name.startswith('r-'):
-            default_vm_chain =  '-'.join(vm_name.split('-')[:-1]) + "-def"
-        
-        try:
-            if default_vm_chain != None: 
-                execute("iptables -F " + default_vm_chain)
-        except:
-            logging.debug("Ignoring failure to delete chain " + default_vm_chain)
-        
-        try:
-            if default_vm_chain != None: 
-                execute("iptables -X " + vmchain_default)
-        except:
-            logging.debug("Ignoring failure to delete chain " + default_vm_chain)
-
-        try:
-            execute("iptables -F " + vm_chain)
-        except:
-            logging.debug("Ignoring failure to delete  chain " + vm_chain)
-        
-        try:
-            execute("iptables -X " + vm_chain)
-        except:
-            logging.debug("Ignoring failure to delete  chain " + vm_chain)
-        
-        if vif is not None:
-            try:
-                dnats = execute("iptables-save -t nat | grep " + vif + " | sed 's/-A/-D/'").split("\n")
-                for dnat in dnats:
-                    try:
-                        execute("iptables -t nat " + dnat)
-                    except:
-                        logging.debug("Igoring failure to delete dnat: " + dnat) 
-            except:
-                pass
-            
-        OvmSecurityGroup.remove_rule_log_for_vm(vm_name)
-        
-        if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-'] ]:
-            return True
-        
-        return True
-
-    @staticmethod
-    def delete_iptables_rules_for_vm(vm_name):
-        vm_name = OvmSecurityGroup.truncate_vm_name(vm_name)
-        vm_chain = vm_name
-        query = "iptables-save | grep " +  vm_chain + " | grep physdev-is-bridged | sed 's/-A/-D/'"
-        delete_cmds = execute(query).split('\n')
-        delete_cmds.pop()
-        
-        for cmd in delete_cmds:
-            try:
-                execute("iptables " + cmd)
-            except:
-                logging.exception("Ignoring failure to delete rules for vm " + vm_name)
-
-    @staticmethod
-    def delete_ebtables_rules_for_vm(vm_name):
-        vm_name = OvmSecurityGroup.truncate_vm_name(vm_name)        
-        query = "ebtables -t nat -L --Lx | grep ROUTING | grep " +  vm_name + " | sed 's/-A/-D/'"
-        delete_cmds = execute(query).split('\n')
-        delete_cmds.pop()
-
-        for cmd in delete_cmds:
-            try:
-                execute(cmd)
-            except:
-                logging.debug("Ignoring failure to delete ebtables rules for vm " + vm_name)
-                
-        chains = [vm_name + "-in", vm_name + "-out"]
-        
-        for chain in chains:
-            try:
-                execute("ebtables -t nat -F " +  chain)
-                execute("ebtables -t nat -X " +  chain)
-            except:
-                logging.debug("Ignoring failure to delete ebtables chain for vm " + vm_name)
-
-    @staticmethod
-    def truncate_vm_name(vm_name):
-        if vm_name.startswith('i-') or vm_name.startswith('r-'):
-            truncated_vm_name = '-'.join(vm_name.split('-')[:-1])
-        else:
-            truncated_vm_name = vm_name
-        return truncated_vm_name        
-
-    @staticmethod
-    def write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno):
-        log_file_name = "/var/run/cloud/" + vm_name + ".log"
-        logging.debug("Writing log to " + log_file_name)
-        logf = open(log_file_name, 'w')
-        output = ','.join([vm_name, vm_id, vm_ip, dom_id, signature, seqno])
-
-        result = True        
-        try:
-            logf.write(output)
-            logf.write('\n')
-        except:
-            logging.debug("Failed to write to rule log file " + log_file_name)
-            result = False
-            
-        logf.close()        
-        return result
-
-    @staticmethod
-    def remove_rule_log_for_vm(vm_name):
-        log_file_name = "/var/run/cloud/" + vm_name +".log"
-
-        result = True
-        try:
-            os.remove(log_file_name)
-        except:
-            logging.debug("Failed to delete rule log file " + log_file_name)
-            result = False
-        
-        return result
-
-    @staticmethod
-    def check_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno):
-        log_file_name = "/var/run/cloud/" + vm_name + ".log"
-        if not os.path.exists(log_file_name):
-            return [True, True, True, True, True, True]
-            
-        try:
-            lines = (line.rstrip() for line in open(log_file_name))
-        except:
-            logging.debug("failed to open " + log_file_name) 
-            return [True, True, True, True, True, True]
-
-        [_vm_name, _vm_id, _vm_ip, _dom_id, _signature, _seqno] = ['_', '-1', '_', '-1', '_', '-1']
-        try:
-            for line in lines:
-                [_vm_name, _vm_id, _vm_ip, _dom_id, _signature, _seqno] = line.split(',')
-                break
-        except:
-            logging.debug("Failed to parse log file for vm " + vm_name)
-            remove_rule_log_for_vm(vm_name)
-            return [True, True, True, True, True, True]
-        
-        return [(vm_name != _vm_name), (vm_id != _vm_id), (vm_ip != _vm_ip), (dom_id != _dom_id), (signature != _signature), (seqno != _seqno)]
-
-    
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-            

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
deleted file mode 100755
index 9d83037..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
+++ /dev/null
@@ -1,428 +0,0 @@
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-from OvmCommonModule import *
-from OVSSiteSR import sp_create, sr_create, sr_do
-from OVSParser import parse_ocfs2_cluster_conf
-from OVSXCluster import clusterm_set_ocfs2_cluster_conf, clusterm_start_o2cb_service
-from OVSSiteRMServer import get_master_ip
-from OvmOCFS2Module import OvmOCFS2
-import re
-
-class OvmStoragePoolDecoder(json.JSONDecoder):
-    def decode(self, jStr):
-        dct = asciiLoads(jStr)
-        pool = OvmStoragePool()
-        setAttrFromDict(pool, 'uuid', dct)
-        setAttrFromDict(pool, 'type', dct)
-        setAttrFromDict(pool, 'path', dct)
-        return pool
-
-class OvmStoragePoolEncoder(json.JSONEncoder):
-    def default(self, obj):
-        if not isinstance(obj, OvmStoragePool): raise Exception("%s is not instance of OvmStoragePool"%type(obj))
-        dct = {}
-        safeDictSet(obj, dct, 'uuid')
-        safeDictSet(obj, dct, 'type')
-        safeDictSet(obj, dct, 'path')
-        safeDictSet(obj, dct, 'mountPoint')
-        safeDictSet(obj, dct, 'totalSpace')
-        safeDictSet(obj, dct, 'freeSpace')
-        safeDictSet(obj, dct, 'usedSpace')
-        return dct
-
-def fromOvmStoragePool(pool):
-    return normalizeToGson(json.dumps(pool, cls=OvmStoragePoolEncoder))
-
-def toOvmStoragePool(jStr):
-    return json.loads(jStr, cls=OvmStoragePoolDecoder)
-
-logger = OvmLogger('OvmStoragePool')   
-class OvmStoragePool(OvmObject):
-    uuid = ''
-    type = ''
-    path = ''
-    mountPoint = ''
-    totalSpace = 0
-    freeSpace = 0
-    usedSpace = 0
-
-    def _getSrByNameLable(self, poolUuid):
-        d = db_dump('sr')
-        for uuid, sr in d.items():
-            if sr.name_label == poolUuid:
-                return sr
-    
-        raise Exception("No SR matching to %s" % poolUuid)
-    
-    def _getSpaceinfoOfDir(self, dir):
-        stat = os.statvfs(dir)
-        freeSpace = stat.f_frsize * stat.f_bavail;
-        totalSpace = stat.f_blocks * stat.f_frsize;
-        return (totalSpace, freeSpace)
-    
-    def _checkDirSizeForImage(self, dir, image):
-        (x, free_storage_size) = OvmStoragePool()._getSpaceinfoOfDir(dir)
-        image_size = os.path.getsize(image)
-        if image_size > (free_storage_size + 1024 * 1024 * 1024):
-            raise Exception("No space on dir %s (free storage:%s, vm size:%s)"%(dir, free_storage_size, image_size))
-         
-    def _getAllMountPoints(self):
-        mps = []
-        d = db_dump('sr')
-        for uuid, sr in d.items():
-            mps.append(sr.mountpoint)
-        return mps
-    
-    def _isMounted(self, path):
-        res = doCmd(['mount'])
-        return (path in res)
-    
-    def _mount(self, target, mountpoint, readonly=False):
-        if not exists(mountpoint):
-            os.makedirs(mountpoint)
-            
-        if not OvmStoragePool()._isMounted(mountpoint):
-            if readonly:
-                doCmd(['mount', target, mountpoint, '-r'])
-            else:
-                doCmd(['mount', target, mountpoint])
-    
-    def _umount(self, mountpoint):
-        umountCmd = ['umount', '-f', mountpoint]
-        doCmd(umountCmd)
-        ls = os.listdir(mountpoint)
-        if len(ls) == 0:
-            rmDirCmd = ['rm', '-r', mountpoint]
-            doCmd(rmDirCmd)
-        else:
-            logger.warning(OvmStoragePool._umount, "Something wrong when umount %s, there are still files in directory:%s", mountpoint, " ".join(ls))
-         
-    @staticmethod
-    def create(jStr):
-        try:
-            pool = toOvmStoragePool(jStr)
-            logger.debug(OvmStoragePool.create, fromOvmStoragePool(pool))
-            spUuid = jsonSuccessToMap(sp_create(pool.type, pool.path))['uuid']
-            srUuid = jsonSuccessToMap(sr_create(spUuid, name_label=pool.uuid))['uuid']
-            sr_do(srUuid, "initialize")
-            rs = SUCC()
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.create, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.create), errmsg)
-    
-    @staticmethod
-    def getDetailsByUuid(uuid):
-        try:
-            sr = OvmStoragePool()._getSrByNameLable(uuid)
-            pool = OvmStoragePool()
-            safeSetAttr(pool, 'uuid', uuid)
-            #Note: the sr.sp.fs_type is not mapped to its class name which we use in mgmt server
-            safeSetAttr(pool, 'type', sr.sp.__class__.__name__)
-            safeSetAttr(pool, 'path', sr.sp.get_fs_spec())
-            safeSetAttr(pool, 'mountPoint', sr.mountpoint)
-            (totalSpace, freeSpace) = OvmStoragePool()._getSpaceinfoOfDir(sr.mountpoint)
-            safeSetAttr(pool, 'totalSpace', totalSpace)
-            safeSetAttr(pool, 'freeSpace', freeSpace)
-            safeSetAttr(pool, 'usedSpace', totalSpace - freeSpace)
-            res = fromOvmStoragePool(pool)
-            logger.debug(OvmStoragePool.getDetailsByUuid, res)
-            return res
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.getDetailsByUuid, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.getDetailsByUuid), errmsg)
-    
-    @staticmethod
-    def downloadTemplate(uuid, secPath):
-        secMountPoint = None
-        try:
-            logger.debug(OvmStoragePool.downloadTemplate, "download %s to pool %s"%(secPath, uuid))
-            try:
-                tmpUuid = get_uuid()
-                secMountPoint = join("/var/cloud/", tmpUuid)
-                if not exists(secMountPoint):
-                    os.makedirs(secMountPoint)
-    
-                templateFile = None
-                if secPath.endswith("raw"):
-                    secPathDir = os.path.dirname(secPath)
-                    templateFile = os.path.basename(secPath)
-                else:
-                    secPathDir = secPath
-                    
-                # mount as read-only
-                mountCmd = ['mount.nfs', secPathDir, secMountPoint, '-r']
-                doCmd(mountCmd)
-    
-                if not templateFile:
-                    for f in os.listdir(secMountPoint):
-                        if isfile(join(secMountPoint, f)) and f.endswith('raw'):
-                            templateFile = f
-                            break    
-        
-                if not templateFile:
-                    raise Exception("Can not find raw template in secondary storage")
-                templateSecPath = join(secMountPoint, templateFile)
-    
-                sr = OvmStoragePool()._getSrByNameLable(uuid)
-                priStorageMountPoint = sr.mountpoint
-                # Although mgmt server will check the size, we check again for safety
-                OvmStoragePool()._checkDirSizeForImage(priStorageMountPoint, templateSecPath)
-                seedDir = join(priStorageMountPoint, 'seed_pool', tmpUuid)
-                if exists(seedDir):
-                    raise Exception("%s already here, cannot override existing template" % seedDir)
-                os.makedirs(seedDir)
-    
-                tgt = join(seedDir, templateFile)
-                cpTemplateCmd = ['cp', templateSecPath, tgt]
-                logger.info(OvmStoragePool.downloadTemplate, " ".join(cpTemplateCmd))
-                doCmd(cpTemplateCmd)
-                templateSize = os.path.getsize(tgt) 
-                logger.info(OvmStoragePool.downloadTemplate, "primary_storage_download success:installPath:%s, templateSize:%s"%(tgt,templateSize))
-                rs = toGson({"installPath":tgt, "templateSize":templateSize})
-                return rs
-            except Exception, e:
-                errmsg = fmt_err_msg(e)
-                logger.error(OvmStoragePool.downloadTemplate, errmsg)
-                raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.downloadTemplate), errmsg)
-        finally:
-            if exists(secMountPoint):
-                try:
-                    OvmStoragePool()._umount(secMountPoint)
-                except Exception, e:
-                    errmsg = fmt_err_msg(e)
-                    logger.error(OvmStoragePool.downloadTemplate, 'unmount secondary storage at %s failed, %s'%(secMountPoint, errmsg))
-
-    @staticmethod
-    def prepareOCFS2Nodes(clusterName, nodeString):        
-        def configureEtcHosts(nodes):
-            if not exists(ETC_HOSTS):
-                orignalConf = ""
-            else:
-                fd = open(ETC_HOSTS, "r")
-                orignalConf = fd.read()
-                fd.close()
-            
-            pattern = r"(.*%s.*)|(.*%s.*)"
-            newlines = []
-            for n in nodes:
-                p = pattern % (n["ip_address"], n["name"])
-                orignalConf = re.sub(p, "", orignalConf)
-                newlines.append("%s\t%s\n"%(n["ip_address"], n["name"]))
-            
-            orignalConf = orignalConf + "".join(newlines)
-            # remove extra empty lines
-            orignalConf = re.sub(r"\n\s*\n*", "\n", orignalConf)
-            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure /etc/hosts:%s\n"%orignalConf)
-            fd = open(ETC_HOSTS, "w")
-            fd.write(orignalConf)
-            fd.close()
-        
-        def configureHostName(nodes):
-            myIp = successToMap(get_master_ip())['ip']
-            nodeName = None
-            for n in nodes:
-                if myIp == n["ip_address"]:
-                    nodeName = n["name"]
-                    break
-            
-            if nodeName == None: raise Exception("Cannot find node equals to my ip address:%s"%myIp)
-            if not exists(HOSTNAME_FILE):
-                originalConf = ""
-            else:
-                fd = open(HOSTNAME_FILE, "r")
-                originalConf = fd.read()
-                fd.close()
-            
-            pattern = r"HOSTNAME=(.*)"
-            # remove any old hostname
-            originalConf = re.sub(pattern, "", originalConf)
-            # remove extra empty lines
-            originalConf = re.sub(r"\n\s*\n*", "\n", originalConf) + "\n" + "HOSTNAME=%s"%nodeName
-            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure %s:%s\n"%(HOSTNAME_FILE,originalConf))
-            fd = open(HOSTNAME_FILE, "w")
-            fd.write(originalConf)
-            fd.close()
-            doCmd(['hostname', nodeName])
-        
-        def addNodes(nodes, clusterName):
-            ocfs2 = OvmOCFS2()
-            ocfs2._load()
-            isOnline = ocfs2._isClusterOnline(clusterName)
-            if not isOnline:
-                ocfs2._prepareConf(clusterName)
-            
-            for n in nodes:
-                ocfs2._addNode(n['name'], n['number'], n['ip_address'], 7777, clusterName, isOnline)
-            
-        def checkStaleCluster(clusterName):
-            if exists('/sys/kernel/config/cluster/'):
-                dirs = os.listdir('/sys/kernel/config/cluster/')
-                for dir in dirs:
-                    if dir != clusterName:
-                        errMsg = '''CloudStack detected there is a stale cluster(%s) on host %s. Please manually clean up it first then add again by
-1) remove the host from cloudstack 
-2) umount all OCFS2 device on host
-3) /etc/init.d/o2cb offline %s
-4) /etc/init.d/o2cb restart
-if this doesn't resolve the problem, please check oracle manual to see how to offline a cluster
-    ''' % (dir, successToMap(get_master_ip())['ip'], dir)
-                        raise Exception(errMsg)
-            
-        try:
-            checkStaleCluster(clusterName)
-            nodeString = nodeString.strip(";")
-            nodes = []
-            for n in nodeString.split(";"):
-                params = n.split(":")
-                if len(params) != 3: raise Exception("Wrong parameter(%s) in node string(%s)"%(n, nodeString))
-                dict = {"number":params[0], "ip_address":params[1], "name":params[2]}
-                nodes.append(dict)
-            
-            if len(nodes) > 255:
-                raise Exception("%s nodes beyond maximum 255 allowed by OCFS2"%len(nodes))
-            
-            configureHostName(nodes)
-            configureEtcHosts(nodes)
-            addNodes(nodes, clusterName)
-            OvmOCFS2()._start(clusterName)
-            fd = open(OCFS2_CONF, 'r')
-            conf = fd.readlines()
-            fd.close()
-            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure cluster.conf to:\n%s"%' '.join(conf))
-            rs = SUCC()
-            return rs
-        
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.prepareOCFS2Nodes, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.prepareOCFS2Nodes), errmsg)
-    
-    @staticmethod
-    def createTemplateFromVolume(secStorageMountPath, installPath, volumePath):
-        try:
-            secMountPoint = ""
-            if not isfile(volumePath): raise Exception("Cannot find %s"%volumePath)
-            vmCfg = join(dirname(volumePath), 'vm.cfg')
-            vmName = getVmNameFromConfigureFile(vmCfg)
-            if vmName in doCmd(['xm', 'list']):
-                raise Exception("%s is still running, please stop it first then create template again"%vmName)
-            
-            tmpUuid = get_uuid()
-            secMountPoint = join("/var/cloud/", tmpUuid)
-            OvmStoragePool()._mount(secStorageMountPath, secMountPoint)
-            installPath = installPath.lstrip('/')
-            destPath = join(secMountPoint, installPath)
-            #This prevent us deleting whole secondary in case we got a wrong installPath
-            if destPath == secMountPoint: raise Exception("Install path equals to root of secondary storage(%s)"%destPath)
-            if exists(destPath):
-                logger.warning(OvmStoragePool.createTemplateFromVolume, "%s is already here, delete it since it is most likely stale"%destPath)
-                doCmd(['rm', '-rf', destPath])
-            OvmStoragePool()._checkDirSizeForImage(secMountPoint, volumePath)
-            
-            os.makedirs(destPath)
-            newName = get_uuid() + ".raw"
-            destName = join(destPath, newName)
-            doCmd(['cp', volumePath, destName])
-            size = os.path.getsize(destName)
-            resInstallPath = join(installPath, newName)
-            OvmStoragePool()._umount(secMountPoint)
-            rs = toGson({"installPath":resInstallPath, "templateFileName":newName, "virtualSize":size, "physicalSize":size})
-            return rs
-        
-        except Exception, e:
-            try:
-                if exists(secMountPoint):
-                    OvmStoragePool()._umount(secMountPoint)
-            except Exception, e:
-                logger.warning(OvmStoragePool.createTemplateFromVolume, "umount %s failed"%secMountPoint)       
-                
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.createTemplateFromVolume, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.createTemplateFromVolume), errmsg)
-    
-    @staticmethod
-    def delete(uuid):
-        try:
-            sr = OvmStoragePool()._getSrByNameLable(uuid)
-            primaryStoragePath = sr.mountpoint
-            OvmStoragePool()._umount(primaryStoragePath)
-            rs = SUCC()
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.delete, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.delete), errmsg) 
-        
-    @staticmethod
-    def copyVolume(secStorageMountPath, volumeFolderOnSecStorage, volumePath, storagePoolUuid, toSec):
-        def copyToSecStorage(secMountPoint, volumeFolderOnSecStorage, volumePath):
-            if not isfile(volumePath): raise Exception("Cannot find volume at %s"%volumePath)
-            OvmStoragePool()._checkDirSizeForImage(secMountPoint, volumePath)
-            volumeFolderOnSecStorage = volumeFolderOnSecStorage.lstrip("/")
-            destPath = join(secMountPoint, volumeFolderOnSecStorage)
-            #This prevent us deleting whole secondary in case we got a wrong volumeFolderOnSecStorage
-            if destPath == secMountPoint: raise Exception("volume path equals to root of secondary storage(%s)"%destPath)
-            if exists(destPath):
-                logger.warning(OvmStoragePool.copyVolume, "%s already exists, delete it first"%destPath)
-                doCmd(['rm', '-rf', destPath])
-            os.makedirs(destPath)
-            newName = get_uuid() + ".raw"
-            destName = join(destPath, newName)
-            doCmd(['cp', volumePath, destName])
-            return destName
-        
-        def copyToPrimary(secMountPoint, volumeFolderOnSecStorage, volumePath, primaryMountPath):
-            srcPath = join(secMountPoint, volumeFolderOnSecStorage.lstrip("/"), volumePath.lstrip("/"))
-            if not srcPath.endswith(".raw"): srcPath = srcPath + ".raw"
-            if not isfile(srcPath): raise Exception("Cannot find volume at %s"%srcPath)
-            if not exists(primaryMountPath): raise Exception("Primary storage(%s) seems to have gone"%primaryMountPath)
-            OvmStoragePool()._checkDirSizeForImage(primaryMountPath, srcPath)
-            destPath = join(primaryMountPath, "sharedDisk")
-            newName = get_uuid() + ".raw"
-            destName = join(destPath, newName)
-            doCmd(['cp', srcPath, destName])
-            return destName
-                      
-        secMountPoint = ""
-        try:
-            tmpUuid = get_uuid()
-            secMountPoint = join("/var/cloud/", tmpUuid)
-            OvmStoragePool()._mount(secStorageMountPath, secMountPoint)
-            if toSec:
-                resultPath = copyToSecStorage(secMountPoint, volumeFolderOnSecStorage, volumePath)
-            else:
-                sr = OvmStoragePool()._getSrByNameLable(storagePoolUuid)
-                primaryStoragePath = sr.mountpoint
-                resultPath = copyToPrimary(secMountPoint, volumeFolderOnSecStorage, volumePath, primaryStoragePath)
-            OvmStoragePool()._umount(secMountPoint)
-            
-            # ingratiate bad mgmt server design, it asks 'installPath' but it only wants the volume name without suffix
-            volumeUuid = basename(resultPath).rstrip(".raw")
-            rs = toGson({"installPath":volumeUuid})
-            return rs
-        except Exception, e:
-            try:
-                if exists(secMountPoint):
-                    OvmStoragePool()._umount(secMountPoint)
-            except Exception, e:
-                logger.warning(OvmStoragePool.copyVolume, "umount %s failed"%secMountPoint)       
-                
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmStoragePool.copyVolume, errmsg)
-            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.copyVolume), errmsg)
-                
-                
-            
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
deleted file mode 100644
index 3405c5a..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-'''
-Created on May 17, 2011
-
-@author: frank
-'''
-from OvmCommonModule import *
-
-class OvmVifDecoder(json.JSONDecoder):
-    def decode(self, jStr):
-        deDict = asciiLoads(jStr)
-        vif = OvmVif()
-        vif.mac = deDict['mac']
-        vif.bridge = deDict['bridge']
-        return vif
-    
-class OvmVifEncoder(json.JSONEncoder):
-    def default(self, obj):
-        if not isinstance(obj, OvmVif): raise Exception("%s is not instance of OvmVif"%type(obj))
-        dct = {}
-        safeDictSet(obj, dct, 'mac')
-        safeDictSet(obj, dct, 'bridge')
-        safeDictSet(obj, dct, 'type')
-        safeDictSet(obj, dct, 'name')
-        return dct    
-
-def fromOvmVif(vif):
-    return normalizeToGson(json.dumps(vif, cls=OvmVifEncoder))
-
-def fromOvmVifList(vifList):
-    return [fromOvmVif(v) for v in vifList]
-
-def toOvmVif(jStr):
-    return json.loads(jStr, cls=OvmVifDecoder)
-
-def toOvmVifList(jStr):
-    vifs = []
-    for i in jStr:
-        vif = toOvmVif(i)
-        vifs.append(vif)
-    return vifs
-
-class OvmVif(OvmObject):
-    name = ''
-    mac = ''
-    bridge = ''
-    type = ''
-    mode = ''
-    
-    def toXenString(self):
-        return "%s,%s,%s"%(self.mac, self.bridge, self.type)

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
deleted file mode 100755
index 12c3206..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/OvmVmModule.py
+++ /dev/null
@@ -1,538 +0,0 @@
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-'''
-Created on May 17, 2011
-
-@author: frank
-'''
-from OvmCommonModule import *
-from OvmDiskModule import *
-from OvmVifModule import *
-from OvmHostModule import OvmHost
-from string import Template
-from OVSXXenVMConfig import *
-from OVSSiteVM import start_vm, stop_vm, reset_vm
-from OVSSiteCluster import *
-from OvmStoragePoolModule import OvmStoragePool
-from OVSXXenStore import xen_get_vm_path, xen_get_vnc_port
-from OVSDB import db_get_vm
-from OVSXMonitor import xen_get_vm_perf_metrics, xen_get_xm_info
-from OVSXXenVM import xen_migrate_vm
-from OVSSiteRMVM import unregister_vm, register_vm, set_vm_status
-from OVSSiteVMInstall import install_vm_hvm
-from OVSSiteRMServer import get_master_ip
-from OVSXXenVMInstall import xen_change_vm_cdrom
-from OVSXAPIUtil import XenAPIObject, session_login, session_logout
-
-
-logger = OvmLogger("OvmVm")
-
-class OvmVmDecoder(json.JSONDecoder):
-    def decode(self, jStr):
-        deDict = asciiLoads(jStr)
-        vm = OvmVm()
-        setAttrFromDict(vm, 'cpuNum', deDict, int)
-        setAttrFromDict(vm, 'memory', deDict, long)
-        setattr(vm, 'rootDisk', toOvmDisk(deDict['rootDisk']))
-        setattr(vm, 'vifs', toOvmVifList(deDict['vifs']))
-        setattr(vm, 'disks', toOvmDiskList(deDict['disks']))
-        setAttrFromDict(vm, 'name', deDict)
-        setAttrFromDict(vm, 'uuid', deDict)
-        setAttrFromDict(vm, 'bootDev', deDict)
-        setAttrFromDict(vm, 'type', deDict)
-        return vm
-
-class OvmVmEncoder(json.JSONEncoder):
-    def default(self, obj):
-        if not isinstance(obj, OvmVm): raise Exception("%s is not instance of OvmVm"%type(obj))
-        dct = {}
-        safeDictSet(obj, dct, 'cpuNum')
-        safeDictSet(obj, dct, 'memory')
-        safeDictSet(obj, dct, 'powerState')
-        safeDictSet(obj, dct, 'name')
-        safeDictSet(obj, dct, 'type')
-        vifs = fromOvmVifList(obj.vifs)
-        dct['vifs'] = vifs
-        rootDisk = fromOvmDisk(obj.rootDisk)
-        dct['rootDisk'] = rootDisk
-        disks = fromOvmDiskList(obj.disks)
-        dct['disks'] = disks
-        return dct
-        
-def toOvmVm(jStr):
-    return json.loads(jStr, cls=OvmVmDecoder)
-
-def fromOvmVm(vm):
-    return normalizeToGson(json.dumps(vm, cls=OvmVmEncoder))
-
-class OvmVm(OvmObject):
-    cpuNum = 0
-    memory = 0
-    rootDisk = None
-    vifs = []
-    disks = []
-    powerState = ''
-    name = ''
-    bootDev = ''
-    type = ''
-        
-    def _getVifs(self, vmName):
-        vmPath = OvmHost()._vmNameToPath(vmName)
-        domId = OvmHost()._getDomainIdByName(vmName)
-        vifs = successToMap(xen_get_vifs(vmPath))
-        lst = []
-        for k in vifs:
-            v = vifs[k]
-            vifName = 'vif' + domId + '.' + k[len('vif'):]
-            vif = OvmVif()
-            (mac, bridge, type) = v.split(',')
-            safeSetAttr(vif, 'name', vifName)
-            safeSetAttr(vif, 'mac', mac)
-            safeSetAttr(vif, 'bridge', bridge)
-            safeSetAttr(vif, 'type', type)
-            lst.append(vif)
-            
-        return lst
-    
-    def _getVifsFromConfig(self, vmPath):
-        vifs = successToMap(xen_get_vifs(vmPath))
-        lst = []
-        for k in vifs:
-            v = vifs[k]
-            vif = OvmVif()
-            (mac, bridge, type) = v.split(',')
-            safeSetAttr(vif, 'name', k)
-            safeSetAttr(vif, 'mac', mac)
-            safeSetAttr(vif, 'bridge', bridge)
-            safeSetAttr(vif, 'type', type)
-            lst.append(vif)
-        return lst
-    
-    def _getIsoMountPath(self, vmPath):
-        vmName = basename(vmPath)
-        priStoragePath = vmPath.rstrip(join('running_pool', vmName))
-        return join(priStoragePath, 'iso_pool', vmName)
-    
-    def _getVmTypeFromConfigFile(self, vmPath):
-        vmType = successToMap(xen_get_vm_type(vmPath))['type']
-        return vmType.replace('hvm', 'HVM').replace('para', 'PV')
-    
-    def _tapAOwnerFile(self, vmPath):
-        # Create a file with name convention 'host_ip_address' in vmPath
-        # Because xm list doesn't return vm that has been stopped, we scan
-        # primary storage for stopped vm. This file tells us which host it belongs
-        # to. The file is used in OvmHost.getAllVms()
-        self._cleanUpOwnerFile(vmPath)
-        ownerFileName = makeOwnerFileName()
-        fd = open(join(vmPath, ownerFileName), 'w')
-        fd.write(ownerFileName)
-        fd.close()
-    
-    def _cleanUpOwnerFile(self, vmPath):
-        for f in os.listdir(vmPath):
-            fp = join(vmPath, f)
-            if isfile(fp) and f.startswith(OWNER_FILE_PREFIX):
-                os.remove(fp)
-    
-    @staticmethod
-    def create(jsonString):    
-        def dumpCfg(vmName, cfgPath):
-            cfgFd = open(cfgPath, 'r')
-            cfg = cfgFd.readlines()
-            cfgFd.close()
-            logger.info(OvmVm.create, "Start %s with configure:\n\n%s\n"%(vmName, "".join(cfg)))
-        
-        def setVifsType(vifs, type):
-            for vif in vifs:
-                vif.type = type
-                
-        def hddBoot(vm, vmPath):
-            vmType = vm.type
-            if vmType == "FROMCONFIGFILE":
-                vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
-                
-            cfgDict = {}
-            if vmType == "HVM":
-                cfgDict['builder'] = "'hvm'"
-                cfgDict['acpi'] = "1"
-                cfgDict['apic'] = "1"
-                cfgDict['device_model'] = "'/usr/lib/xen/bin/qemu-dm'"
-                cfgDict['kernel'] = "'/usr/lib/xen/boot/hvmloader'"
-                vifType = 'ioemu'
-            else:
-                cfgDict['bootloader'] = "'/usr/bin/pygrub'"
-                vifType = 'netfront'
-            
-            cfgDict['name'] = "'%s'"%vm.name
-            cfgDict['disk'] = "[]"
-            cfgDict['vcpus'] = "''"
-            cfgDict['memory'] = "''"
-            cfgDict['on_crash'] = "'destroy'"
-            cfgDict['on_reboot'] = "'restart'"
-            cfgDict['vif'] = "[]"
-            
-            items = []
-            for k in cfgDict.keys():
-                item = " = ".join([k, cfgDict[k]])
-                items.append(item)
-            vmSpec = "\n".join(items)
-                
-            vmCfg = open(join(vmPath, 'vm.cfg'), 'w')
-            vmCfg.write(vmSpec)
-            vmCfg.close()
-            
-            setVifsType(vm.vifs, vifType)
-            raiseExceptionIfFail(xen_set_vcpus(vmPath, vm.cpuNum))
-            raiseExceptionIfFail(xen_set_memory(vmPath, BytesToM(vm.memory)))
-            raiseExceptionIfFail(xen_add_disk(vmPath, vm.rootDisk.path, mode=vm.rootDisk.type))
-            vifs = [OvmVif.toXenString(v) for v in vm.vifs]
-            for vif in vifs:
-                raiseExceptionIfFail(xen_set_vifs(vmPath, vif))
-                
-            for disk in vm.disks:
-                raiseExceptionIfFail(xen_add_disk(vmPath, disk.path, mode=disk.type))
-
-            raiseExceptionIfFail(xen_set_vm_vnc_password(vmPath, ""))
-            cfgFile = join(vmPath, 'vm.cfg')
-            # only HVM supports attaching cdrom
-            if vmType == 'HVM':
-                # Add an empty "hdc:cdrom" entry in config. Fisrt we set boot order to 'd' that is cdrom boot,
-                # then 'hdc:cdrom' entry will be in disk list. Second, change boot order to 'c' which
-                # is harddisk boot. VM can not start with an empty 'hdc:cdrom' when boot order is 'd'.
-                # it's tricky !
-                raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'd'))
-                raiseExceptionIfFail(xen_config_boot_sequence(vmPath, 'c'))
-                
-            raiseExceptionIfFail(xen_correct_cfg(cfgFile, vmPath))
-            xen_correct_qos_cfg(cfgFile)
-            dumpCfg(vm.name, cfgFile)
-            server = successToMap(get_master_ip())['ip']
-            raiseExceptionIfFail(start_vm(vmPath, server))
-            rs = SUCC()
-            return rs
-        
-        def cdBoot(vm, vmPath):
-            isoMountPath = None
-            try:
-                cdrom = None
-                for disk in vm.disks:
-                    if disk.isIso == True:
-                        cdrom = disk
-                        break
-                if not cdrom: raise Exception("Cannot find Iso in disks")
-                
-                isoOnSecStorage = dirname(cdrom.path)
-                isoName = basename(cdrom.path)
-                isoMountPath = OvmVm()._getIsoMountPath(vmPath)
-                OvmStoragePool()._mount(isoOnSecStorage, isoMountPath)
-                isoPath = join(isoMountPath, isoName)
-                if not exists(isoPath):
-                    raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath))
-                
-                stdout = run_cmd(args=['file', isoPath])
-                if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path)
-                
-                #now alter cdrom to correct path
-                cdrom.path = isoPath
-                if len(vm.vifs) != 0:
-                    vif = vm.vifs[0]
-                    #ISO boot must be HVM
-                    vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu'])
-                else:
-                    vifCfg = ''
-                
-                rootDiskSize = os.path.getsize(vm.rootDisk.path)
-                rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True'])
-                disks = [rooDiskCfg]
-                for d in vm.disks:
-                    if d.isIso: continue
-                    size = os.path.getsize(d.path)
-                    cfg = ':'.join([d.path, str(BytesToG(size)), 'True'])
-                    disks.append(cfg)
-                disksCfg = ','.join(disks)
-                server = successToMap(get_master_ip())['ip']
-                   
-                raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server))
-                rs = SUCC()
-                return rs
-            except Exception, e:
-                if isoMountPath and OvmStoragePool()._isMounted(isoMountPath):
-                    doCmd(['umount', '-f', isoMountPath])
-                errmsg = fmt_err_msg(e)
-                raise Exception(errmsg)
-        
-        try:
-            vm = toOvmVm(jsonString)
-            logger.debug(OvmVm.create, "creating vm, spec:%s"%jsonString)
-            rootDiskPath = vm.rootDisk.path
-            if not exists(rootDiskPath): raise Exception("Cannot find root disk %s"%rootDiskPath)
-    
-            rootDiskDir = dirname(rootDiskPath)
-            vmPath = join(dirname(rootDiskDir), vm.name)
-            if not exists(vmPath):
-                doCmd(['ln', '-s', rootDiskDir, vmPath])
-            vmNameFile = open(join(rootDiskDir, 'vmName'), 'w')
-            vmNameFile.write(vm.name)
-            vmNameFile.close()
-            
-            OvmVm()._tapAOwnerFile(rootDiskDir)
-            # set the VM to DOWN before starting, OVS agent will check this status
-            set_vm_status(vmPath, 'DOWN')
-            if vm.bootDev == "HDD":
-                return hddBoot(vm, vmPath)
-            elif vm.bootDev == "CD":
-                return cdBoot(vm, vmPath)
-            else:
-                raise Exception("Unkown bootdev %s for %s"%(vm.bootDev, vm.name))
-
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.create, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.create), errmsg)
-    
-    @staticmethod
-    def stop(vmName):
-        try:
-            try:
-                OvmHost()._getDomainIdByName(vmName)
-            except NoVmFoundException, e:
-                logger.info(OvmVm.stop, "vm %s is already stopped"%vmName)
-                return SUCC()
-                
-            logger.info(OvmVm.stop, "Stop vm %s"%vmName)
-            try:
-                vmPath = OvmHost()._vmNameToPath(vmName)
-            except Exception, e:
-                errmsg = fmt_err_msg(e)
-                logger.info(OvmVm.stop, "Cannot find link for vm %s on primary storage, treating it as stopped\n %s"%(vmName, errmsg))
-                return SUCC()
-            # set the VM to RUNNING before stopping, OVS agent will check this status
-            set_vm_status(vmPath, 'RUNNING')
-            raiseExceptionIfFail(stop_vm(vmPath))
-            return SUCC()
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.stop, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.stop), errmsg)
-    
-    @staticmethod
-    def reboot(vmName):
-        try:
-            #===================================================================
-            # Xend has a bug of reboot. If reboot vm too quick, xend return success
-            # but actually it refused reboot (seen from log)
-            # vmPath = successToMap(xen_get_vm_path(vmName))['path']
-            # raiseExceptionIfFail(reset_vm(vmPath))
-            #===================================================================
-            vmPath = OvmHost()._vmNameToPath(vmName)
-            OvmVm.stop(vmName)
-            raiseExceptionIfFail(start_vm(vmPath))
-            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
-            logger.info(OvmVm.stop, "reboot vm %s, new vncPort is %s"%(vmName, vncPort))
-            return toGson({"vncPort":str(vncPort)})
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.reboot, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.reboot), errmsg)
-    
-    @staticmethod
-    def getDetails(vmName):          
-        try:
-            vm = OvmVm()
-            
-            try:
-                OvmHost()._getDomainIdByName(vmName)
-                vmPath = OvmHost()._vmNameToPath(vmName)
-                vifsFromConfig = False
-            except NoVmFoundException, e:
-                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
-                vifsFromConfig = True
-                
-            
-            if not isdir(vmPath):
-                # The case is, when vm starting was not completed at primaryStroageDownload or createVolume(e.g. mgmt server stop), the mgmt
-                # server will keep vm state in staring, then a stop command will be sent. The stop command will delete bridges that vm attaches,
-                # by retriving birdge info by OvmVm.getDetails(). In this case, the vm doesn't exists, so returns a fake object here.
-                fakeDisk = OvmDisk()
-                vm.rootDisk = fakeDisk
-            else:
-                if vifsFromConfig:
-                    vm.vifs.extend(vm._getVifsFromConfig(vmPath))
-                else:
-                    vm.vifs.extend(vm._getVifs(vmName))
-                    
-                safeSetAttr(vm, 'name', vmName)
-                disks = successToMap(xen_get_vdisks(vmPath))['vdisks'].split(',')
-                rootDisk = None
-                #BUG: there is no way to get type of disk, assume all are "w"
-                for d in disks:
-                    if vmName in d:
-                        rootDisk = OvmDisk()
-                        safeSetAttr(rootDisk, 'path', d)
-                        safeSetAttr(rootDisk, 'type', "w")
-                        continue
-                    disk = OvmDisk()
-                    safeSetAttr(disk, 'path', d)
-                    safeSetAttr(disk, 'type', "w")
-                    vm.disks.append(disk)
-                if not rootDisk: raise Exception("Cannot find root disk for vm %s"%vmName)
-                safeSetAttr(vm, 'rootDisk', rootDisk)
-                vcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
-                safeSetAttr(vm, 'cpuNum', vcpus)
-                memory = MtoBytes(int(successToMap(xen_get_memory(vmPath))['memory']))
-                safeSetAttr(vm, 'memory', memory)
-                vmStatus = db_get_vm(vmPath)
-                safeSetAttr(vm, 'powerState',  vmStatus['status'])
-                vmType = successToMap(xen_get_vm_type(vmPath))['type'].replace('hvm', 'HVM').replace('para', 'PV')
-                safeSetAttr(vm, 'type', vmType)
-                
-            rs = fromOvmVm(vm)
-            logger.info(OvmVm.getDetails, rs)
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.getDetails, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getDetails), errmsg)
-    
-    @staticmethod
-    def getVmStats(vmName):
-        def getVcpuNumAndUtils():
-            try:
-                session = session_login()
-                refs = session.xenapi.VM.get_by_name_label(vmName)
-                if len(refs) == 0:
-                    raise Exception("No ref for %s found in xenapi VM objects"%vmName)
-                vm = XenAPIObject('VM', session, refs[0])
-                VM_metrics = XenAPIObject("VM_metrics", session, vm.get_metrics())
-                items = VM_metrics.get_VCPUs_utilisation().items()
-                nvCpus = len(items)
-                if nvCpus == 0:
-                    raise Exception("vm %s has 0 vcpus !!!"%vmName)
-                
-                xmInfo = successToMap(xen_get_xm_info())
-                nCpus = int(xmInfo['nr_cpus'])
-                totalUtils = 0.0
-                # CPU utlization of VM = (total cpu utilization of each vcpu) / number of physical cpu
-                for num, util in items:
-                    totalUtils += float(util)
-                avgUtils = float(totalUtils/nCpus) * 100
-                return (nvCpus, avgUtils)
-            finally:
-                session_logout()
-                
-                
-        try:
-            try:
-                OvmHost()._getDomainIdByName(vmName)
-                vmPath = OvmHost()._vmNameToPath(vmName)
-                (nvcpus, avgUtils) = getVcpuNumAndUtils()
-                vifs = successToMap(xen_get_vifs(vmPath))
-                rxBytes = 0
-                txBytes = 0
-                vifs = OvmVm()._getVifs(vmName)
-                for vif in vifs:
-                    rxp = join('/sys/class/net', vif.name, 'statistics/rx_bytes')
-                    txp = join("/sys/class/net/", vif.name, "statistics/tx_bytes")
-                    if not exists(rxp): raise Exception('can not find %s'%rxp)
-                    if not exists(txp): raise Exception('can not find %s'%txp)
-                    rxBytes += long(doCmd(['cat', rxp])) / 1000
-                    txBytes += long(doCmd(['cat', txp])) / 1000
-            except NoVmFoundException, e:
-                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
-                nvcpus = int(successToMap(xen_get_vcpus(vmPath))['vcpus'])
-                avgUtils = 0
-                rxBytes = 0
-                txBytes = 0
-            
-            rs = toGson({"cpuNum":nvcpus, "cpuUtil":avgUtils, "rxBytes":rxBytes, "txBytes":txBytes})
-            logger.debug(OvmVm.getVmStats, rs)
-            return rs           
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.getVmStats, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVmStats), errmsg)
-    
-    @staticmethod
-    def migrate(vmName, targetHost):
-        try:
-            vmPath = OvmHost()._vmNameToPath(vmName)
-            raiseExceptionIfFail(xen_migrate_vm(vmPath, targetHost))
-            unregister_vm(vmPath)
-            OvmVm()._cleanUpOwnerFile(vmPath)
-            return SUCC()
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.migrate, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.migrate), errmsg)
-    
-    @staticmethod
-    def register(vmName):
-        try:
-            vmPath = OvmHost()._vmNameToPath(vmName)
-            raiseExceptionIfFail(register_vm(vmPath))
-            OvmVm()._tapAOwnerFile(vmPath)
-            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
-            rs = toGson({"vncPort":str(vncPort)})
-            logger.debug(OvmVm.register, rs)
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.register, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.register), errmsg)
-    
-    @staticmethod
-    def getVncPort(vmName):
-        try:
-            vncPort= successToMap(xen_get_vnc_port(vmName))['vnc_port']
-            rs = toGson({"vncPort":vncPort})
-            logger.debug(OvmVm.getVncPort, rs)
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.getVncPort, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.getVncPort), errmsg)
-        
-    @staticmethod
-    def detachOrAttachIso(vmName, iso, isAttach):
-        try:
-            if vmName in OvmHost.getAllVms():
-                scope = 'both'
-                vmPath = OvmHost()._vmNameToPath(vmName)
-            else:
-                scope = 'cfg'
-                vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName)
-            
-            vmType = OvmVm()._getVmTypeFromConfigFile(vmPath)
-            if vmType != 'HVM':
-                raise Exception("Only HVM supports attaching/detaching ISO")
-            
-            if not isAttach:
-                iso = ''
-            else:
-                isoName = basename(iso)
-                isoMountPoint = OvmVm()._getIsoMountPath(vmPath)
-                isoOnSecStorage = dirname(iso)
-                OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint)
-                iso = join(isoMountPoint, isoName)
-                       
-            exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope))
-            return SUCC()
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVm.detachOrAttachIso, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
-        
-if __name__ == "__main__":
-    import sys
-    print OvmVm.getDetails(sys.argv[1])
-    #print OvmVm.getVmStats(sys.argv[1])
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
deleted file mode 100644
index 8daa846..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/OvmVolumeModule.py
+++ /dev/null
@@ -1,156 +0,0 @@
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-'''
-Created on June 2, 2011
-
-@author: frank
-'''
-from OvmCommonModule import *
-from OvmStoragePoolModule import OvmStoragePool
-from OVSXUtility import xen_create_disk
-from OvmHostModule import OvmHost
-import os
-
-logger = OvmLogger("OvmVolume")
-
-class OvmVolumeDecoder(json.JSONDecoder):
-    def decode(self, jStr):
-        deDict = asciiLoads(jStr)
-        vol = OvmVolume()
-        setAttrFromDict(vol, 'uuid', deDict)
-        setAttrFromDict(vol, 'size', deDict, long)
-        setAttrFromDict(vol, 'poolUuid', deDict)
-        return vol
-    
-class OvmVolumeEncoder(json.JSONEncoder):
-    def default(self, obj):
-        if not isinstance(obj, OvmVolume): raise Exception("%s is not instance of OvmVolume"%type(obj))
-        dct = {}
-        safeDictSet(obj, dct, 'name')
-        safeDictSet(obj, dct, 'uuid')
-        safeDictSet(obj, dct, 'poolUuid')
-        safeDictSet(obj, dct, 'path')
-        safeDictSet(obj, dct, 'size')
-        return dct
-    
-def toOvmVolume(jStr):
-    return json.loads(jStr, cls=OvmVolumeDecoder)
-
-def fromOvmVolume(vol):
-    return normalizeToGson(json.dumps(vol, cls=OvmVolumeEncoder))
-
-class OvmVolume(OvmObject):
-    name = ''
-    uuid = ''
-    poolUuid = ''
-    path = ''
-    size = 0
-    
-    @staticmethod
-    def createDataDisk(poolUuid, size, isRoot):
-        try:
-            vol = OvmVolume()
-            vol.size = long(size)
-            vol.poolUuid = poolUuid
-            pool = OvmStoragePool()
-            sr = pool._getSrByNameLable(vol.poolUuid)
-            if isRoot:
-                path = join(sr.mountpoint, 'running_pool', get_uuid())
-            else:
-                path = join(sr.mountpoint, 'shareDisk')
-            if not exists(path): os.makedirs(path)
-            freeSpace = pool._getSpaceinfoOfDir(path)
-            if freeSpace < vol.size:
-                raise Exception("%s has not enough space (available:%s, required:%s"%(path, freeSpace, vol.size))
-            
-            vol.uuid = get_uuid()
-            vol.name = vol.uuid + '.raw'
-            filePath = join(path, vol.name)
-            exceptionIfNoSuccess(xen_create_disk(filePath, BytesToM(vol.size)), "Create datadisk %s failed"%filePath)
-            vol.path = filePath
-            rs = fromOvmVolume(vol)
-            logger.debug(OvmVolume.createDataDisk, rs)
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVolume.createDataDisk, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createDataDisk, errmsg))
-        
-    @staticmethod
-    def createFromTemplate(poolUuid, templateUrl):
-        try:
-            if not exists(templateUrl):
-                raise Exception("Cannot find template:%s"%templateUrl)
-            sr = OvmStoragePool()._getSrByNameLable(poolUuid)
-            volDirUuid = get_uuid()
-            volUuid = get_uuid()
-            priStorageMountPoint = sr.mountpoint
-            volDir = join(priStorageMountPoint, 'running_pool', volDirUuid)
-            if exists(volDir):
-                raise Exception("Volume dir %s alreay existed, can not override"%volDir)
-            os.makedirs(volDir)
-            OvmStoragePool()._checkDirSizeForImage(volDir, templateUrl)
-            volName = volUuid + '.raw'
-            tgt = join(volDir, volName)
-            cpVolCmd = ['cp', templateUrl, tgt]
-            doCmd(cpVolCmd)
-            volSize = os.path.getsize(tgt)
-            vol = OvmVolume()
-            vol.name = volName
-            vol.path = tgt
-            vol.size = volSize
-            vol.uuid = volUuid
-            vol.poolUuid = poolUuid
-            rs = fromOvmVolume(vol)
-            logger.debug(OvmVolume.createFromTemplate, rs)
-            return rs
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVolume.createFromTemplate, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createFromTemplate), errmsg)
-    
-    @staticmethod
-    def destroy(poolUuid, path):
-        try:
-            OvmStoragePool()._getSrByNameLable(poolUuid)
-            if not exists(path): raise Exception("Cannot find %s"%path)
-            dir = dirname(path)
-            if exists(join(dir, 'vm.cfg')):
-                # delete root disk
-                vmNamePath = join(dir, 'vmName')
-                if exists(vmNamePath):
-                    vmNameFd = open(vmNamePath, 'r')
-                    vmName = vmNameFd.readline()
-                    vmName = vmName.rstrip('\n')
-                    link = join(dirname(dir), vmName)
-                    doCmd(['rm', '-rf', link])
-                    vmNameFd.close()
-                else:
-                    logger.warning(OvmVolume.destroy, "Can not find vmName file in %s"%dir)
-                doCmd(['rm','-rf', dir])
-            else:
-                doCmd(['rm', path])
-            return SUCC()
-        except Exception, e:
-            errmsg = fmt_err_msg(e)
-            logger.error(OvmVolume.destroy, errmsg)
-            raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.destroy), errmsg)
-        
-        
-                              
-if __name__ == "__main__":
-    print OvmVolume.detachOrAttachIso(sys.argv[1], '', False)
-                              
-                              
-                              
-                              
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
----------------------------------------------------------------------
diff --git a/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh b/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
deleted file mode 100755
index 1fffd5a..0000000
--- a/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/bin/sh
-# Copyright 2012 Citrix Systems, Inc. Licensed under the
-# Apache License, Version 2.0 (the "License"); you may not use this
-# file except in compliance with the License.  Citrix Systems, Inc.
-# reserves all rights not expressly granted by the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# Automatically generated by addcopyright.py at 04/03/2012
-
-errExit() {
-    echo $@
-    exit 1
-}
-
-stopHeartbeat() {
-    pidFile="/var/run/ovs-agent/heartbeat.pid"
-    if [ -f $pidFile ]; then
-        pid=`cat $pidFile`
-        ps -p $pid &>/dev/null
-        if [ $? -eq 0 ]; then
-            kill $pid &>/dev/null
-        fi
-    fi
-}
-
-openPortOnIptables() {
-	port="$1"
-	protocol="$2"
-    chkconfig --list iptables | grep "on"
-	if [ $? -eq 0 ]; then
-	    iptables-save | grep "A INPUT -p $protocol -m $protocol --dport $port -j ACCEPT" >/dev/null
-	    if [ $? -ne 0 ]; then
-	        iptables -I INPUT 1 -p $protocol --dport $port -j ACCEPT
-	        if [ $? -ne 0 ]; then
-	            exit_with_error "iptables -I INPUT 1 -p $protocol --dport $port -j ACCEPT failed"
-	        fi
-	        echo "iptables:Open $protocol port $port for DHCP"
-	    fi
-	fi
-}
-
-applyPatch() {
-    patchFile="$1"
-    level="$2"
-
-    [ ! -f $patchFile ] && errExit "Can not find $patchFile"
-
-    if [ $? -ne 0 ]; then
-        pushd /opt/ovs-agent-latest &>/dev/null
-        test=`patch -p$level --dry-run -N < $patchFile`
-        if [ $? -ne 0 ]; then
-            tmp=`mktemp`
-            echo $test > $tmp
-            grep "Reversed (or previously applied) patch detected" $tmp &>/dev/null
-            if [ $? -eq 0 ]; then
-                # The file has been patched
-                rm $tmp -f
-                popd &>/dev/null
-                return
-            else
-                rm $tmp -f
-                popd &>/dev/null
-                errExit "Can not apply $patchFile beacuse $test"
-            fi
-        fi
-        patch -p$level < $patchFile
-        [ $? -ne 0 ] && errExit "Patch to $target failed"
-        popd &>/dev/null
-    fi
-}
-
-postSetup() {
-    openPortOnIptables 7777 tcp # for OCFS2, maybe tcp only
-    openPortOnIptables 7777 udp
-    openPortOnIptables 3260 tcp # for ISCSI, maybe tcp only
-    openPortOnIptables 3260 udp
-    applyPatch "/opt/ovs-agent-latest/OvmPatch.patch" 2
-    applyPatch "/opt/ovs-agent-latest/OvmDontTouchOCFS2ClusterWhenAgentStart.patch" 1
-    applyPatch "/opt/ovs-agent-latest/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch" 1
-
-    stopHeartbeat
-
-    /etc/init.d/ovs-agent restart --disable-nowayout
-    [ $? -ne 0 ] && errExit "Restart ovs agent failed"
-    exit 0
-}
-
-preSetup() {
-    agentConfig="/etc/ovs-agent/agent.ini"
-    agentInitScript="/etc/init.d/ovs-agent"
-
-    [ ! -f $agentConfig ] && errExit "Can not find $agentConfig"
-    [ ! -f $agentInitScript ] && errExit "Can not find $agentInitScript"
-
-    version=`grep "version="  $agentInitScript | cut -d "=" -f 2`
-    [ x"$version" != x"2.3" ] && errExit "The OVS agent version is $version, we only support 2.3 now"
-
-    # disable SSL
-    sed -i 's/ssl=enable/ssl=disable/g' $agentConfig
-    [ $? -ne 0 ] && errExit "configure ovs agent to non ssl failed"
-
-    if [ ! -L /opt/ovs-agent-latest ]; then
-        eval $agentInitScript status | grep 'down' && $agentInitScript start
-        [ $? -ne 0 ] && errExit "Start ovs agent failed"
-        [ ! -L /opt/ovs-agent-latest ] && errExit "No link at /opt/ovs-agent-latest"
-    fi
-    exit 0
-}
-
-[ $# -ne 1 ] && errExit "Usage: configureOvm.sh command"
-
-case "$1" in
-    preSetup)
-        preSetup
-        ;;
-    postSetup)
-        postSetup
-        ;;
-    *)
-        errExit "Valid commands: preSetup postSetup"
-esac
-

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
----------------------------------------------------------------------
diff --git a/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java b/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
deleted file mode 100755
index 3523562..0000000
--- a/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2012 Citrix Systems, Inc. Licensed under the
-// Apache License, Version 2.0 (the "License"); you may not use this
-// file except in compliance with the License.  Citrix Systems, Inc.
-// reserves all rights not expressly granted by the License.
-// You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-// 
-// Automatically generated by addcopyright.py at 04/03/2012
-package com.cloud.ovm.hypervisor;
-
-import java.net.InetAddress;
-import java.net.URI;
-import java.net.UnknownHostException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import javax.ejb.Local;
-import javax.naming.ConfigurationException;
-
-import org.apache.log4j.Logger;
-import org.apache.xmlrpc.XmlRpcException;
-
-import com.cloud.configuration.Config;
-import com.cloud.agent.api.StartupCommand;
-import com.cloud.agent.api.StartupRoutingCommand;
-import com.cloud.dc.ClusterVO;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.exception.DiscoveryException;
-import com.cloud.host.HostInfo;
-import com.cloud.host.HostVO;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.ovm.object.Connection;
-import com.cloud.ovm.object.OvmHost;
-import com.cloud.resource.Discoverer;
-import com.cloud.resource.DiscovererBase;
-import com.cloud.resource.ResourceManager;
-import com.cloud.resource.ResourceStateAdapter;
-import com.cloud.resource.ServerResource;
-import com.cloud.resource.UnableDeleteHostException;
-import com.cloud.utils.component.Inject;
-import com.cloud.utils.db.SearchCriteria;
-import com.cloud.utils.db.SearchCriteria2;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.utils.ssh.SSHCmdHelper;
-
-@Local(value = Discoverer.class)
-public class OvmDiscoverer extends DiscovererBase implements Discoverer,
-		ResourceStateAdapter {
-	private static final Logger s_logger = Logger
-			.getLogger(OvmDiscoverer.class);
-	protected String _publicNetworkDevice;
-	protected String _privateNetworkDevice;
-	protected String _guestNetworkDevice;
-
-	@Inject
-	ClusterDao _clusterDao;
-	@Inject
-	ResourceManager _resourceMgr;
-
-	@Override
-	public boolean configure(String name, Map<String, Object> params)
-			throws ConfigurationException {
-		super.configure(name, params);
-		_publicNetworkDevice = _params.get(Config.OvmPublicNetwork.key());
-		_privateNetworkDevice = _params.get(Config.OvmPrivateNetwork.key());
-		_guestNetworkDevice = _params.get(Config.OvmGuestNetwork.key());
-		_resourceMgr.registerResourceStateAdapter(this.getClass()
-				.getSimpleName(), this);
-		return true;
-	}
-
-	protected OvmDiscoverer() {
-	}
-
-	@Override
-	public boolean stop() {
-		_resourceMgr.unregisterResourceStateAdapter(this.getClass()
-				.getSimpleName());
-		return super.stop();
-	}
-
-	private boolean checkIfExisted(String guid) {
-		SearchCriteria2<HostVO, HostVO> sc = SearchCriteria2.create(HostVO.class);
-		sc.addAnd(sc.getEntity().getGuid(), SearchCriteria.Op.EQ, guid);
-		sc.addAnd(sc.getEntity().getHypervisorType(), SearchCriteria.Op.EQ,
-				HypervisorType.Ovm);
-		List<HostVO> hosts = sc.list();
-		return !hosts.isEmpty();
-	}
-
-	@Override
-	public Map<? extends ServerResource, Map<String, String>> find(long dcId,
-			Long podId, Long clusterId, URI url, String username,
-			String password, List<String> hostTags) throws DiscoveryException {
-		Connection conn = null;
-
-		if (!url.getScheme().equals("http")) {
-			String msg = "urlString is not http so we're not taking care of the discovery for this: "
-					+ url;
-			s_logger.debug(msg);
-			return null;
-		}
-		if (clusterId == null) {
-			String msg = "must specify cluster Id when add host";
-			s_logger.debug(msg);
-			throw new CloudRuntimeException(msg);
-		}
-
-		if (podId == null) {
-			String msg = "must specify pod Id when add host";
-			s_logger.debug(msg);
-			throw new CloudRuntimeException(msg);
-		}
-
-		ClusterVO cluster = _clusterDao.findById(clusterId);
-		if (cluster == null
-				|| (cluster.getHypervisorType() != HypervisorType.Ovm)) {
-			if (s_logger.isInfoEnabled())
-				s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors");
-			return null;
-		}
-
-		String agentUsername = _params.get("agentusername");
-		if (agentUsername == null) {
-			throw new CloudRuntimeException("Agent user name must be specified");
-		}
-
-		String agentPassword = _params.get("agentpassword");
-		if (agentPassword == null) {
-			throw new CloudRuntimeException("Agent password must be specified");
-		}
-
-		try {
-			String hostname = url.getHost();
-			InetAddress ia = InetAddress.getByName(hostname);
-			String hostIp = ia.getHostAddress();
-			String guid = UUID.nameUUIDFromBytes(hostIp.getBytes()).toString();
-
-			if (checkIfExisted(guid)) {
-				throw new CloudRuntimeException("The host " + hostIp
-						+ " has been added before");
-			}
-
-			s_logger.debug("Ovm discover is going to disover host having guid "
-					+ guid);
-
-			ClusterVO clu = _clusterDao.findById(clusterId);
-			if (clu.getGuid() == null) {
-				clu.setGuid(UUID.randomUUID().toString());
-				_clusterDao.update(clusterId, clu);
-			}
-
-			com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(
-					hostIp, 22);
-			sshConnection.connect(null, 60000, 60000);
-			sshConnection = SSHCmdHelper.acquireAuthorizedConnection(hostIp,
-					username, password);
-			if (sshConnection == null) {
-				throw new DiscoveryException(
-						String.format(
-								"Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s, discover failed",
-								hostIp, username, password));
-			}
-
-			if (!SSHCmdHelper.sshExecuteCmd(sshConnection,
-					"[ -f '/etc/ovs-agent/agent.ini' ]")) {
-				throw new DiscoveryException(
-						"Can not find /etc/ovs-agent/agent.ini " + hostIp);
-			}
-
-			Map<String, String> details = new HashMap<String, String>();
-			OvmResourceBase ovmResource = new OvmResourceBase();
-			details.put("ip", hostIp);
-			details.put("username", username);
-			details.put("password", password);
-			details.put("zone", Long.toString(dcId));
-			details.put("guid", guid);
-			details.put("pod", Long.toString(podId));
-			details.put("cluster", Long.toString(clusterId));
-			details.put("agentusername", agentUsername);
-			details.put("agentpassword", agentPassword);
-			if (_publicNetworkDevice != null) {
-				details.put("public.network.device", _publicNetworkDevice);
-			}
-			if (_privateNetworkDevice != null) {
-				details.put("private.network.device", _privateNetworkDevice);
-			}
-			if (_guestNetworkDevice != null) {
-				details.put("guest.network.device", _guestNetworkDevice);
-			}
-
-			Map<String, Object> params = new HashMap<String, Object>();
-			params.putAll(details);
-			ovmResource.configure("Ovm Server", params);
-			ovmResource.start();
-
-			conn = new Connection(hostIp, "oracle", agentPassword);
-			/* After resource start, we are able to execute our agent api */
-			OvmHost.Details d = OvmHost.getDetails(conn);
-			details.put("agentVersion", d.agentVersion);
-			details.put(HostInfo.HOST_OS_KERNEL_VERSION, d.dom0KernelVersion);
-			details.put(HostInfo.HYPERVISOR_VERSION, d.hypervisorVersion);
-
-			Map<OvmResourceBase, Map<String, String>> resources = new HashMap<OvmResourceBase, Map<String, String>>();
-			resources.put(ovmResource, details);
-			return resources;
-		} catch (XmlRpcException e) {
-			s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url,
-					e);
-			return null;
-		} catch (UnknownHostException e) {
-			s_logger.debug(
-					"Host name resolve failed exception, Unable to discover OVM: "
-							+ url, e);
-			return null;
-		} catch (ConfigurationException e) {
-			s_logger.debug(
-					"Configure resource failed, Unable to discover OVM: " + url,
-					e);
-			return null;
-		} catch (Exception e) {
-			s_logger.debug("Unable to discover OVM: " + url, e);
-			return null;
-		}
-	}
-
-	@Override
-	public void postDiscovery(List<HostVO> hosts, long msId)
-			throws DiscoveryException {
-		// TODO Auto-generated method stub
-
-	}
-
-	@Override
-	public boolean matchHypervisor(String hypervisor) {
-		return HypervisorType.Ovm.toString().equalsIgnoreCase(hypervisor);
-	}
-
-	@Override
-	public HypervisorType getHypervisorType() {
-		return HypervisorType.Ovm;
-	}
-
-	@Override
-	public HostVO createHostVOForConnectedAgent(HostVO host,
-			StartupCommand[] cmd) {
-		// TODO Auto-generated method stub
-		return null;
-	}
-
-	@Override
-	public HostVO createHostVOForDirectConnectAgent(HostVO host,
-			StartupCommand[] startup, ServerResource resource,
-			Map<String, String> details, List<String> hostTags) {
-		StartupCommand firstCmd = startup[0];
-		if (!(firstCmd instanceof StartupRoutingCommand)) {
-			return null;
-		}
-
-		StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd);
-		if (ssCmd.getHypervisorType() != HypervisorType.Ovm) {
-			return null;
-		}
-
-		return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Ovm,
-				details, hostTags);
-	}
-
-	@Override
-	public DeleteHostAnswer deleteHost(HostVO host, boolean isForced,
-			boolean isForceDeleteStorage) throws UnableDeleteHostException {
-		if (host.getType() != com.cloud.host.Host.Type.Routing
-				|| host.getHypervisorType() != HypervisorType.Ovm) {
-			return null;
-		}
-
-		_resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage);
-		return new DeleteHostAnswer(true);
-	}
-
-}


Mime
View raw message