incubator-cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ahu...@apache.org
Subject [5/8] Introduced plugins directory. Moved ovm into plugins. Introduced build.xml for ovm.
Date Wed, 20 Jun 2012 01:48:00 GMT
http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHaHeartBeatModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHaHeartBeatModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHaHeartBeatModule.py
new file mode 100755
index 0000000..eeee35a
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHaHeartBeatModule.py
@@ -0,0 +1,102 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on Jun 6, 2011
+
+@author: frank
+'''
+from OvmCommonModule import *
+try:
+    from multiprocessing import Process, Manager
+except ImportError:
+    from processing import Process, Manager
+import signal
+
+logger = OvmLogger("OvmHaHeartBeat")
+
+class OvmHaHeartBeat(object):
+    '''
+    classdocs
+    '''
+    def __init__(self, mountPoint, ip):
+        self.mountPoint = mountPoint
+        self.ip = ip
+    
+    def mark(self, file):
+        timestamp = HEARTBEAT_TIMESTAMP_FORMAT % time.time()
+        try:
+            fd = open(file, 'w')
+            fd.write(timestamp)
+            fd.close()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHaHeartBeat.mark, errmsg)
+        
+    def run(self):
+        '''
+        Constructor
+        '''
+        heartBeatDir = join(self.mountPoint, HEARTBEAT_DIR)
+        if not exists(heartBeatDir):
+            os.makedirs(heartBeatDir)
+        hearBeatFile = join(heartBeatDir, ipToHeartBeatFileName(self.ip))
+        while True:
+            self.mark(hearBeatFile)
+            time.sleep(120)
+    
+    @staticmethod
+    def start(poolPath, ip):
+        pidFile = join(PID_DIR, "heartbeat.pid")
+        
+        def isLive():
+            if exists(pidFile):
+                f = open(pidFile)
+                pid = f.read().strip()
+                f.close()
+                if isdir("/proc/%s" % pid):
+                    return long(pid)
+            return None
+        
+        def stopOldHeartBeat(pid):
+            os.kill(pid, signal.SIGTERM)
+            time.sleep(5)
+            pid = isLive()
+            if pid != None:
+                logger.debug(OvmHaHeartBeat.start, "SIGTERM cannot stop heartbeat process %s, will try SIGKILL"%pid)
+                os.kill(pid, signal.SIGKILL)
+                time.sleep(5)
+                pid = isLive()
+                if pid != None:
+                    raise Exception("Cannot stop old heartbeat process %s, setup heart beat failed"%pid)
+        
+        def heartBeat(hb):
+            hb.run()
+           
+        def setupHeartBeat():
+            hb = OvmHaHeartBeat(poolPath, ip)
+            p = Process(target=heartBeat, args=(hb,))
+            p.start()
+            pid = p.pid
+            if not isdir(PID_DIR):
+                os.makedirs(PID_DIR)
+            pidFd = open(pidFile, 'w')
+            pidFd.write(str(pid))
+            pidFd.close()
+            logger.info(OvmHaHeartBeat.start, "Set up heart beat successfully, pid is %s" % pid)
+             
+        pid = isLive()
+        if pid != None:
+            stopOldHeartBeat(pid)
+            
+        setupHeartBeat()
+            

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py
new file mode 100755
index 0000000..9cfbd46
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py
@@ -0,0 +1,304 @@
+#/usr/bin/python
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+
+from OvmCommonModule import *
+from OVSSiteRMServer import get_master_ip, register_server
+from OVSCommons import *
+from OVSXMonitor import xen_get_xm_info
+from OVSXSysInfo import get_agent_version
+from OVSSiteRMServer import get_srv_agent_status
+from OVSXMonitor import sys_perf_info
+from OVSDB import db_get_vm
+from OvmStoragePoolModule import OvmStoragePool
+from OvmHaHeartBeatModule import OvmHaHeartBeat
+import re
+
+logger = OvmLogger('OvmHost')
+
+class OvmHostEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmHost): raise Exception("%s is not instance of OvmHost"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'masterIp')
+        safeDictSet(obj, dct, 'cpuNum')
+        safeDictSet(obj, dct, 'cpuSpeed')
+        safeDictSet(obj, dct, 'totalMemory')
+        safeDictSet(obj, dct, 'freeMemory')
+        safeDictSet(obj, dct, 'dom0Memory')
+        safeDictSet(obj, dct, 'agentVersion')
+        safeDictSet(obj, dct, 'name')
+        safeDictSet(obj, dct, 'dom0KernelVersion')
+        safeDictSet(obj, dct, 'hypervisorVersion')
+        return dct
+        
+
+def fromOvmHost(host):
+    return normalizeToGson(json.dumps(host, cls=OvmHostEncoder))
+    
+class OvmHost(OvmObject):
+    masterIp = ''
+    cpuNum = 0
+    cpuSpeed = 0
+    totalMemory = 0
+    freeMemory = 0
+    dom0Memory = 0
+    agentVersion = ''
+    name = ''
+    dom0KernelVersion = ''
+    hypervisorVersion = ''
+
+    def _getVmPathFromPrimaryStorage(self, vmName):
+        '''
+        we don't have a database to store vm states, so there is no way to retrieve information of a vm
+        when it was already stopped. The trick is to try to find the vm path in primary storage then we 
+        can read information from its configure file.
+        '''
+        mps = OvmStoragePool()._getAllMountPoints()
+        vmPath = None
+        for p in mps:
+            vmPath = join(p, 'running_pool', vmName)
+            if exists(vmPath): break
+        if not vmPath:
+            logger.error(self._getVmPathFromPrimaryStorage, "Cannot find link for %s in any primary storage, the vm was really gone!"%vmName)
+            raise Exception("Cannot find link for %s in any primary storage, the vm was really gone!"%vmName)
+        return vmPath
+    
+    def _vmNameToPath(self, vmName):
+        # the xen_get_vm_path always sucks!!!
+        #return successToMap((vmName))['path']
+        return self._getVmPathFromPrimaryStorage(vmName)
+    
+    def _getAllDomains(self):
+        stdout = timeout_command(["xm", "list"])
+        l = [ line.split()[:2] for line in stdout.splitlines() ]
+        l = [ (name, id) for (name, id) in l if name not in ("Name", "Domain-0") ]
+        return l
+    
+    def _getDomainIdByName(self, vmName):
+        l = self._getAllDomains()
+        for name, id in l:
+            if vmName == name: return id
+        raise NoVmFoundException("No domain id for %s found"%vmName)
+
+    @staticmethod
+    def registerAsMaster(hostname, username="oracle", password="password", port=8899, isSsl=False):
+        try:
+            logger.debug(OvmHost.registerAsMaster, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl))
+            exceptionIfNoSuccess(register_server(hostname, 'site', False, username, password, port, isSsl),
+                             "Register %s as site failed"%hostname)
+            exceptionIfNoSuccess(register_server(hostname, 'utility', False, username, password, port, isSsl),
+                             "Register %s as utility failed"%hostname)
+            rs = SUCC()
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.registerAsMaster, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsMaster), errmsg)
+    
+    @staticmethod
+    def registerAsVmServer(hostname, username="oracle", password="password", port=8899, isSsl=False):
+        try:
+            logger.debug(OvmHost.registerAsVmServer, "ip=%s, username=%s, password=%s, port=%s, isSsl=%s"%(hostname, username, password, port, isSsl))
+            exceptionIfNoSuccess(register_server(hostname, 'xen', False, username, password, port, isSsl),
+                             "Register %s as site failed"%hostname)
+            rs = SUCC()
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.registerAsVmServer, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.registerAsVmServer), errmsg)
+    
+    @staticmethod
+    def ping(hostname):
+        try:
+            logger.debug(OvmHost.ping, "ping %s"%hostname)
+            exceptionIfNoSuccess(get_srv_agent_status(hostname), "Ovs agent is down")
+            rs = SUCC()
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.ping, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.ping, errmsg))
+        
+    @staticmethod
+    def getDetails():
+        try:
+            obj = OvmHost()
+            masterIp = successToMap(get_master_ip())
+            safeSetAttr(obj, 'masterIp', masterIp['ip'])
+            xmInfo = successToMap(xen_get_xm_info())
+            totalMemory = MtoBytes(long(xmInfo['total_memory']))
+            safeSetAttr(obj, 'totalMemory', totalMemory)
+            freeMemory = MtoBytes(long(xmInfo['free_memory']))
+            safeSetAttr(obj, 'freeMemory', freeMemory)
+            dom0Memory = totalMemory - freeMemory
+            safeSetAttr(obj, 'dom0Memory', dom0Memory)
+            cpuNum = int(xmInfo['nr_cpus'])
+            safeSetAttr(obj, 'cpuNum', cpuNum)
+            cpuSpeed = int(xmInfo['cpu_mhz'])
+            safeSetAttr(obj, 'cpuSpeed', cpuSpeed)
+            name = xmInfo['host']
+            safeSetAttr(obj, 'name', name)
+            dom0KernelVersion = xmInfo['release']
+            safeSetAttr(obj, 'dom0KernelVersion', dom0KernelVersion)
+            hypervisorVersion = xmInfo['xen_major'] + '.' + xmInfo['xen_minor'] + xmInfo['xen_extra']
+            safeSetAttr(obj, 'hypervisorVersion', hypervisorVersion)
+            agtVersion = successToMap(get_agent_version())
+            safeSetAttr(obj, 'agentVersion', agtVersion['agent_version'])
+            res = fromOvmHost(obj)
+            logger.debug(OvmHost.getDetails, res)
+            return res
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.getDetails, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.getDetails), errmsg)
+    
+    @staticmethod
+    def getPerformanceStats(bridgeName):
+        try:
+            rxBytesPath = join("/sys/class/net/", bridgeName, "statistics/rx_bytes")
+            txBytesPath = join("/sys/class/net/", bridgeName, "statistics/tx_bytes")
+            if not exists(rxBytesPath): raise Exception("Cannot find %s"%rxBytesPath)
+            if not exists(txBytesPath): raise Exception("Cannot find %s"%txBytesPath)
+            rxBytes = long(doCmd(['cat', rxBytesPath])) / 1000
+            txBytes = long(doCmd(['cat', txBytesPath])) / 1000
+            sysPerf = successToMap(sys_perf_info())
+            cpuUtil = float(100 - float(sysPerf['cpu_idle']) * 100)
+            freeMemory = MtoBytes(long(sysPerf['mem_free']))
+            xmInfo = successToMap(xen_get_xm_info())
+            totalMemory = MtoBytes(long(xmInfo['total_memory']))
+            rs = toGson({"cpuUtil":cpuUtil, "totalMemory":totalMemory, "freeMemory":freeMemory, "rxBytes":rxBytes, "txBytes":txBytes})
+            logger.info(OvmHost.getPerformanceStats, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.getPerformanceStats, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.getPerformanceStats), errmsg)
+    
+    @staticmethod
+    def getAllVms():
+        def scanStoppedVmOnPrimaryStorage(vms):
+            def isMyVmDirLink(path):
+                return (islink(path) and exists(join(path, 'vm.cfg')) and ('-' in basename(path)) and (exists(join(path, makeOwnerFileName()))))
+                    
+            mps = OvmStoragePool()._getAllMountPoints()
+            for mountPoint in mps:
+                runningPool = join(mountPoint, 'running_pool')
+                if not exists(runningPool):
+                    logger.debug(OvmHost.getAllVms, "Primary storage %s not existing, skip it. this should be first getAllVms() called from Ovm resource configure"%runningPool)
+                    continue
+                    
+                for dir in os.listdir(runningPool):
+                    vmDir = join(runningPool, dir)
+                    if not isMyVmDirLink(vmDir):
+                        logger.debug(OvmHost.getAllVms, "%s is not our vm directory, skip it"%vmDir)
+                        continue
+                    if vms.has_key(dir):
+                        logger.debug(OvmHost.getAllVms, "%s is already in running list, skip it"%dir)
+                        continue
+                    
+                    logger.debug(OvmHost.getAllVms, "Found a stopped vm %s on primary storage %s, report it to management server" % (dir, mountPoint))
+                    vms[dir] = "DOWN"
+                    
+                    
+        try:
+            l = OvmHost()._getAllDomains()
+            dct = {}
+            host = OvmHost()
+            for name, id in l:
+                try:
+                    vmPath = host._getVmPathFromPrimaryStorage(name)
+                    vmStatus = db_get_vm(vmPath)
+                    dct[name] = vmStatus['status']
+                except Exception, e:
+                    logger.debug(OvmHost.getAllVms, "Cannot find link for %s on primary storage, treat it as Error"%name)
+                    dct[name] = 'ERROR'
+                        
+            scanStoppedVmOnPrimaryStorage(dct)
+            rs = toGson(dct)
+            logger.info(OvmHost.getAllVms, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.getAllVms, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.getAllVms), errmsg)
+    
+    @staticmethod
+    def fence(ip):
+        # try 3 times to avoid race condition that read when heartbeat file is being written
+        def getTimeStamp(hbFile):
+            for i in range(1, 3):
+                f = open(hbFile, 'r') 
+                str = f.readline()
+                items = re.findall(HEARTBEAT_TIMESTAMP_PATTERN, str)
+                if len(items) == 0:
+                    logger.debug(OvmHost.fence, "Get an incorrect heartbeat data %s, will retry %s times" % (str, 3-i))
+                    f.close()
+                    time.sleep(5)
+                else:
+                    f.close()
+                    timestamp = items[0]
+                    return timestamp.lstrip('<timestamp>').rstrip('</timestamp>')
+            
+        # totally check in 6 mins, the update frequency is 2 mins
+        def check(hbFile):
+            for i in range(1, 6):
+                ts = getTimeStamp(hbFile)
+                time.sleep(60)
+                nts = getTimeStamp(hbFile)
+                if ts != nts: return True
+                else: logger.debug(OvmHost.fence, '%s is not updated, old value=%s, will retry %s times'%(hbFile, ts, 6-i))
+            return False
+                
+        try:
+            mountpoints = OvmStoragePool()._getAllMountPoints()
+            hbFile = None
+            for m in mountpoints:
+                p = join(m, HEARTBEAT_DIR, ipToHeartBeatFileName(ip))
+                if exists(p):
+                    hbFile = p
+                    break
+                
+            if not hbFile: raise Exception('Can not find heartbeat file for %s in pools %s'%(ip, mountpoints))
+            rs = toGson({"isLive":check(hbFile)})
+            logger.debug(OvmHost.fence, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.fence, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.fence), errmsg)
+    
+    @staticmethod
+    def setupHeartBeat(poolUuid, ip):
+        try:
+            sr = OvmStoragePool()._getSrByNameLable(poolUuid)
+            OvmHaHeartBeat.start(sr.mountpoint, ip)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.setupHeartBeat, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.setupHeartBeat), errmsg)
+    
+    @staticmethod
+    def pingAnotherHost(ip):
+        try:
+            doCmd(['ping', '-c', '1', '-n', '-q', ip])
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmHost.pingAnotherHost, errmsg)
+            raise XmlRpcFault(toErrCode(OvmHost, OvmHost.pingAnotherHost), errmsg)
+        
+if __name__ == "__main__":
+    print OvmHost.getAllVms()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmLoggerModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmLoggerModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmLoggerModule.py
new file mode 100755
index 0000000..07fe78f
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmLoggerModule.py
@@ -0,0 +1,51 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on May 19, 2011
+
+@author: frank
+'''
+import logging
+
+class OvmLogger(object):
+    '''
+    classdocs
+    '''
+
+
+    def __init__(self, className):
+        '''
+        Constructor
+        '''
+        self.className = className
+        self.logger = logging.getLogger(className)
+    
+    def info(self, func, msg=None):
+        assert callable(func), "%s is not a function"%func
+        fmt = "[%s.%s]: "%(self.className, func.__name__)
+        self.logger.info("%s%s"%(fmt,msg))
+    
+    def debug(self, func, msg=None):
+        assert callable(func), "%s is not a function"%func
+        fmt = "[%s.%s]: "%(self.className, func.__name__)
+        self.logger.debug("%s%s"%(fmt,msg))
+    
+    def error(self, func, msg=None):
+        assert callable(func), "%s is not a function"%func
+        fmt = "[%s.%s]: "%(self.className, func.__name__)
+        self.logger.error("%s%s"%(fmt,msg))
+    
+    def warning(self, func, msg=None):
+        assert callable(func), "%s is not a function"%func
+        fmt = "[%s.%s]: "%(self.className, func.__name__)
+        self.logger.warning("%s%s"%(fmt,msg))
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmNetworkModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmNetworkModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmNetworkModule.py
new file mode 100755
index 0000000..5813fcd
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmNetworkModule.py
@@ -0,0 +1,429 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+from OvmCommonModule import *
+import traceback
+import time
+import re
+ 
+logger = OvmLogger("OvmNetwork")
+
+class Filter:
+    class Network:
+        IFNAME_LO     = r'(lo)'
+        IFNAME_BRIDGE = r'(xenbr\d+|vlan\d+)'
+        IFNAME_PIF    = r'(eth\d+$|bond\d+$)'
+        IFNAME_VLAN   = r'(eth\d+.\d+$|bond\d+.\d+$)'
+
+
+class Parser(object):
+    '''
+    classdocs
+    '''    
+    def findall(self, pattern, samples):
+        """
+        @param pattern: search pattern
+        @param result: Parser line execution result
+        @return : list of search
+        find result of Parser which has same pattern
+        findall Parser find all pattern in a string
+        """
+        result = []
+        for line in samples:
+            items = re.findall(pattern, line)
+            for item in items:
+                result.append(item)
+        return result
+    
+    def checkPattern(self, pattern, cmd_result):
+        """
+        @param pattern: search pattern
+        @param cmd_result: Parser line execution result
+        @return : True (if pattern is occurred)
+        """
+        for line in cmd_result:
+            items = re.findall(pattern, line)
+            if len(items) > 0:
+                return True
+        return False
+    
+    def search(self, cmd_result, pattern):
+        return None
+    
+class OvmVlanDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        deDict = asciiLoads(jStr)
+        vlan = OvmVlan()
+        setAttrFromDict(vlan, 'vid', deDict, int)
+        setAttrFromDict(vlan, 'pif', deDict)
+        return vlan
+
+class OvmVlanEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmVlan): raise Exception("%s is not instance of OvmVlan"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'name')
+        safeDictSet(obj, dct, 'vid')
+        safeDictSet(obj, dct, 'pif')
+        return dct
+    
+def toOvmVlan(jStr):
+    return json.loads(jStr, cls=OvmVlanDecoder)
+
+def fromOvmVlan(vlan):
+    return normalizeToGson(json.dumps(vlan, cls=OvmVlanEncoder))
+
+class OvmBridgeDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        deDic = asciiLoads(jStr)
+        bridge = OvmBridge()
+        setAttrFromDict(bridge, 'name', deDic)
+        setAttrFromDict(bridge, 'attach', deDic)
+        return bridge
+
+class OvmBridgeEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmBridge): raise Exception("%s is not instance of OvmBridge"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'name')
+        safeDictSet(obj, dct, 'attach')
+        safeDictSet(obj, dct, 'interfaces')
+        return dct
+    
+def toOvmBridge(jStr):
+    return json.loads(jStr, cls=OvmBridgeDecoder)
+
+def fromOvmBridge(bridge):
+    return normalizeToGson(json.dumps(bridge, cls=OvmBridgeEncoder))
+
+class OvmInterface(OvmObject):
+    name = ''
+
+class OvmVlan(OvmInterface):
+    vid = 0
+    pif = ''
+
+class OvmBridge(OvmInterface):
+    attach = ''
+    interfaces = []
+
+        
+class OvmNetwork(OvmObject):
+    '''
+    Network
+    '''
+
+    @property
+    def pifs(self):
+        return self._getInterfaces("pif")
+
+    @property
+    def vlans(self):
+        return self._getInterfaces("vlan")
+
+    @property
+    def bridges(self):
+        return self._getInterfaces("bridge")
+    
+    def __init__(self):
+        self.Parser = Parser()
+
+    def _createVlan(self, vlan):
+        """
+        @param jsonString : parameter from client side
+        @return : succ xxxxx
+        ex. jsonString => {vid:100, pif:eth0}
+        ex. return     => 
+        """
+        
+        #Pre-condition
+        #check Physical Interface Name
+        if vlan.pif not in self.pifs.keys():    
+            msg = "Physical Interface(%s) does not exist" % vlan.pif
+            logger.debug(self._createVlan, msg)
+            raise Exception(msg)
+
+        #Pre-condition    
+        #check Vlan Interface Name
+        ifName = "%s.%s" % (vlan.pif, vlan.vid)
+        if ifName in self.vlans.keys():
+            msg = "Vlan Interface(%s) already exist, return it" % ifName
+            logger.debug(self._createVlan, msg)
+            return self.vlans[ifName]
+            
+        doCmd(['vconfig', 'add', vlan.pif, vlan.vid])
+        self.bringUP(ifName)
+        logger.debug(self._createVlan, "Create vlan %s successfully"%ifName)
+        return self.vlans[ifName]
+    
+    def _deleteVlan(self, name):
+        if name not in self.vlans.keys():
+            raise Exception("No vlan device %s found"%name)
+        
+        vlan = self.vlans[name]
+        self.bringDown(vlan.name)
+        doCmd(['vconfig', 'rem', vlan.name])
+        logger.debug(self._deleteVlan, "Delete vlan %s successfully"%vlan.name)
+        
+    
+    def _createBridge(self, bridge):
+        """
+        @return : success
+        ex. {bridge:xapi100, attach:eth0.100}
+        create bridge interface, and attached it 
+        cmd 1: brctl addbr bridge
+        cmd 2: brctl addif brdige attach
+        """
+        
+        if "xenbr" not in bridge.name and "vlan" not in bridge.name:
+            raise Exception("Invalid bridge name %s. Bridge name must be in partten xenbr/vlan, e.g. xenbr0"%bridge.name)
+        
+        #pre-condition
+        #check Bridge Interface Name
+        if bridge.name in self.bridges.keys():
+            msg = "Bridge(%s) already exist, return it" % bridge.name
+            logger.debug(self._createBridge, msg)
+            return self.bridges[bridge.name]
+
+        #pre-condition
+        #check attach must exist
+        #possible to attach in PIF or VLAN 
+        if bridge.attach not in self.vlans.keys() and bridge.attach not in self.pifs.keys():
+            msg = "%s is not either pif or vlan" % bridge.attach
+            logger.error(self._createBridge, msg)
+            raise Exception(msg)
+
+        doCmd(['brctl', 'addbr', bridge.name])
+        doCmd(['brctl', 'addif', bridge.name, bridge.attach])
+        self.bringUP(bridge.name)
+        logger.debug(self._createBridge, "Create bridge %s on %s successfully"%(bridge.name, bridge.attach))
+        return self.bridges[bridge.name]
+    
+    def _getBridges(self):
+        return self.bridges.keys()
+    
+    def _getVlans(self):
+        return self.vlans.keys()
+    
+    def _deleteBridge(self, name):
+        if name not in self.bridges.keys():
+            raise Exception("Can not find bridge %s"%name)
+        
+        bridge = self.bridges[name]
+        if bridge.attach in bridge.interfaces: bridge.interfaces.remove(bridge.attach)
+        if len(bridge.interfaces) != 0:
+            logger.debug(self._deleteBridge, "There are still some interfaces(%s) on bridge %s"%(bridge.interfaces, bridge.name))
+            return False
+        self.bringDown(bridge.name)
+        doCmd(['brctl', 'delbr', bridge.name])
+        logger.debug(self._deleteBridge, "Delete bridge %s successfully"%bridge.name)
+        return True
+        
+    def _getInterfaces(self, type):
+        """
+        @param type : ["pif", "bridge", "tap"]
+        @return : dictionary of Interface Objects
+        get All Interfaces based on type
+        """
+        devices = os.listdir('/sys/class/net')
+        ifs = {}
+        if type == "pif":
+            devs = self.Parser.findall(Filter.Network.IFNAME_PIF, devices)
+            for dev in set(devs):
+                ifInst = OvmInterface()
+                ifInst.name = dev
+                ifs[dev] = ifInst
+                
+        elif type == "vlan":
+            devs = self.Parser.findall(Filter.Network.IFNAME_VLAN, devices)
+            for dev in set(devs):
+                ifInst = OvmVlan()
+                ifInst.name = dev
+                (pif, vid) = dev.split('.')
+                ifInst.pif = pif
+                ifInst.vid = vid
+                ifs[dev] = ifInst
+                 
+        elif type == "bridge":
+            devs = self.Parser.findall(Filter.Network.IFNAME_BRIDGE, devices)
+            for dev in set(devs):
+                ifInst = OvmBridge()
+                ifInst.name = dev
+                devs = os.listdir(join('/sys/class/net', dev, 'brif'))
+                ifInst.interfaces = devs
+                attches = self.Parser.findall(Filter.Network.IFNAME_PIF, devs) + self.Parser.findall(Filter.Network.IFNAME_VLAN, devs)
+                if len(attches) > 1: raise Exception("Multiple PIF on bridge %s (%s)"%(dev, attches))
+                elif len(attches) == 0: ifInst.attach = "null"
+                elif len(attches) == 1: ifInst.attach = attches[0]
+                ifs[dev] = ifInst
+
+        return ifs
+    
+    def bringUP(self, ifName):
+        doCmd(['ifconfig', ifName, 'up'])
+    
+    def bringDown(self, ifName):
+        doCmd(['ifconfig', ifName, 'down'])
+           
+    @staticmethod
+    def createBridge(jStr):
+        try:
+            network = OvmNetwork()
+            network._createBridge(toOvmBridge(jStr))
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.createBridge, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createBridge), errmsg)
+    
+    @staticmethod
+    def deleteBridge(name):
+        try:
+            network = OvmNetwork()
+            network._deleteBridge(name)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.deleteBridge, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteBridge), errmsg)
+    
+    @staticmethod
+    def getAllBridges():
+        try:
+            network = OvmNetwork()
+            rs = toGson(network._getBridges())
+            logger.debug(OvmNetwork.getAllBridges, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.getAllBridges, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getAllBridges), errmsg)
+    
+    @staticmethod
+    def getBridgeByIp(ip):
+        try:
+            routes = doCmd(['ip', 'route']).split('\n')
+            brName = None
+            for r in routes:
+                if ip in r and "xenbr" in r or "vlan" in r:
+                    brName = r.split(' ')[2]
+                    break
+            if not brName: raise Exception("Cannot find bridge with IP %s"%ip)
+            logger.debug(OvmNetwork.getBridgeByIp, "bridge:%s, ip:%s"%(brName, ip))
+            return toGson({"bridge":brName})
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.getBridgeByIp, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeByIp), errmsg)              
+    
+    @staticmethod
+    def getVlans():
+        try:
+            network = OvmNetwork()
+            rs = toGson(network._getVlans())
+            logger.debug(OvmNetwork.getVlans, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.getVlans, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getVlans), errmsg)                        
+    
+    @staticmethod
+    def createVlan(jStr):
+        try:
+            network = OvmNetwork()
+            vlan = network._createVlan(toOvmVlan(jStr))
+            rs = fromOvmVlan(vlan)
+            logger.debug(OvmNetwork.createVlan, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.createVlan, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlan), errmsg)
+    
+    @staticmethod
+    def createVlanBridge(bridgeDetails, vlanDetails):
+        try:
+            network = OvmNetwork()
+            v = toOvmVlan(vlanDetails)
+            b = toOvmBridge(bridgeDetails)
+            vlan = network._createVlan(v)
+            b.attach = vlan.name
+            network._createBridge(b)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.createVlanBridge, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.createVlanBridge), errmsg)
+    
+    @staticmethod
+    def deleteVlanBridge(name):
+        try:
+            network = OvmNetwork()
+            if name not in network.bridges.keys():
+                logger.debug(OvmNetwork.deleteVlanBridge, "No bridge %s found"%name)
+                return SUCC()
+            
+            bridge = network.bridges[name]
+            vlanName = bridge.attach
+            if network._deleteBridge(name):
+                if vlanName != "null":
+                    network._deleteVlan(vlanName)
+                else:
+                    logger.warning(OvmNetwork.deleteVlanBridge, "Bridge %s has no vlan device"%name)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.deleteVlanBridge, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlanBridge), errmsg)
+    
+    @staticmethod
+    def getBridgeDetails(name):
+        try:
+            network = OvmNetwork()
+            if name not in network.bridges.keys():
+                raise Exception("No bridge %s found"%name)
+            bridge = network.bridges[name]
+            rs = fromOvmBridge(bridge)
+            logger.debug(OvmNetwork.getBridgeDetails, rs)
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.getBridgeDetails, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.getBridgeDetails), errmsg)
+           
+    @staticmethod
+    def deleteVlan(name):
+        try:
+            network = OvmNetwork()
+            network._deleteVlan(name)
+            return SUCC()
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmNetwork.deleteVlan, errmsg)
+            raise XmlRpcFault(toErrCode(OvmNetwork, OvmNetwork.deleteVlan), errmsg)                      
+        
+if __name__ == "__main__":
+    try:
+        OvmNetwork.getBridgeDetails(sys.argv[1])
+        #=======================================================================
+        # txt = json.dumps({"vid":104, "pif":"eth0"})
+        # txt2 = json.dumps({"name":"xapi3", "attach":"eth0.104"})
+        # print nw.createVlan(txt)
+        # print nw.createBridge(txt2)
+        # 
+        # nw.deleteBridge("xapi3")
+        # nw.deleteVlan("eth0.104")
+        #=======================================================================
+        
+    except Exception, e:
+        print e

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmOCFS2Module.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmOCFS2Module.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmOCFS2Module.py
new file mode 100755
index 0000000..9a8f0eb
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmOCFS2Module.py
@@ -0,0 +1,76 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+from OvmCommonModule import *
+
+logger = OvmLogger('OvmOCFS2')  
+class OvmOCFS2(OvmObject):
+    def _prepareConf(self, cluster):
+        conf = '''cluster:
+        node_count = 0
+        name = %s
+        '''%cluster
+        dir = dirname(OCFS2_CONF)
+        if not isdir(dir):
+            os.makedirs(dir)
+            
+        fd = open(OCFS2_CONF, 'w')
+        fd.write(conf)
+        fd.close()
+        
+    def _addNode(self, name, nodeNum, ip, port, cluster, isOnline=True):
+        nodePath = '/sys/kernel/config/cluster/%s/node/%s'%(cluster, name)
+        if exists(nodePath):
+            logger.debug(OvmOCFS2._addNode, "node %s already exists, skip it(%s)"%(name, nodePath))
+            return
+        
+        if not isOnline:
+            cmds = ['o2cb_ctl -C -n', name, '-t node', '-a number=%s'%nodeNum, '-a ip_address=%s'%ip, '-a ip_port=%s'%port, '-a cluster=%s'%cluster]
+        else:
+            cmds = ['o2cb_ctl -C -i -n', name, '-t node', '-a number=%s'%nodeNum, '-a ip_address=%s'%ip, '-a ip_port=%s'%port, '-a cluster=%s'%cluster]
+        
+        try:
+            doCmd(cmds)
+        except ShellExceutedFailedException, e:
+            if e.errCode == 239 or "already exists" in e.stderr:
+                logger.debug(OvmOCFS2._addNode, "node %s already exists, skip it(%s)"%(name, e.stderr))
+            else:
+                raise e
+    
+    def _isClusterOnline(self, cluster):
+        cmds = ['service o2cb status', cluster]
+        res = doCmd(cmds)
+        for line in res.split('\n'):
+            if not 'Checking O2CB cluster' in line: continue
+            return not 'Offline' in line
+    
+    def _load(self):
+        cmd = ['service o2cb load']
+        doCmd(cmd)
+        
+    def _start(self, cluster):
+        #blank line are answer by clicking enter
+        config='''
+y
+o2cb
+%s
+
+
+
+
+EOF
+'''%cluster
+        cmd = ['service o2cb configure', '<<EOF', config]
+        doCmd(cmd)
+        cmd = ['service o2cb start %s'%cluster]
+        doCmd(cmd)
+                

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmObjectModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmObjectModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmObjectModule.py
new file mode 100755
index 0000000..0d21dae
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmObjectModule.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on May 17, 2011
+
+@author: frank
+'''
+
+class OvmObject(object):
+   pass
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmPatch.patch
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmPatch.patch b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmPatch.patch
new file mode 100755
index 0000000..5f4c7a5
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmPatch.patch
@@ -0,0 +1,23 @@
+*** OVSServices.py	2011-06-06 12:31:23.279919998 -0700
+--- /tmp/OVSServices.py	2011-06-06 12:32:13.275919997 -0700
+***************
+*** 68,73 ****
+--- 68,75 ----
+  import OVSVMXParser                 #pylint:    disable-msg=W0611
+  import OVSVMDKParser                #pylint:    disable-msg=W0611
+  
++ import OvmDispatcher
++ 
+  from OVSWrappers import D
+  from OVSCommons import exposed
+  
+***************
+*** 113,118 ****
+--- 115,121 ----
+          self.sleep = sleep
+  
+          self._load_modules()
++         OvmDispatcher.InitOvmDispacther()
+  
+  #        #xenapi
+  #        import xen.xend.XendAPI as XendAPI

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
new file mode 100755
index 0000000..9816314
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmSecurityGroupModule.py
@@ -0,0 +1,478 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+from OvmCommonModule import *
+from ConfigFileOps import *
+import os
+import logging
+
+class OvmSecurityGroup(OvmObject):
+
+    @staticmethod
+    def can_bridge_firewall():
+        try:
+            execute("which iptables")
+        except:
+            print "iptables was not found on the host"
+            return False
+
+        try:
+            execute("which ebtables")
+        except:
+            print "ebtables was not found on the host"
+            return False
+        
+        if not os.path.exists('/var/run/cloud'):
+            os.makedirs('/var/run/cloud')
+     
+        return OvmSecurityGroup.cleanup_rules()        
+
+    @staticmethod
+    def cleanup_rules():
+        try:
+            chainscmd = "iptables-save | grep '^:' | grep -v '.*-def' | awk '{print $1}' | cut -d':' -f2"
+            chains = execute(chainscmd).split('\n')
+            cleaned = 0
+            cleanup = []
+            for chain in chains:
+                if 1 in [ chain.startswith(c) for c in ['r-', 'i-', 's-', 'v-'] ]:
+                    vm_name = chain
+                else:
+                    continue
+                        
+                cmd = "xm list | grep " + vm_name 
+                try:
+                    result = execute(cmd)
+                except:
+                    result = None
+
+                if result == None or len(result) == 0:
+                    logging.debug("chain " + chain + " does not correspond to a vm, cleaning up")
+                    cleanup.append(vm_name)
+                    
+            for vm_name in cleanup:
+                OvmSecurityGroup.delete_all_network_rules_for_vm(vm_name)
+                        
+            logging.debug("Cleaned up rules for " + str(len(cleanup)) + " chains")
+            return True
+        except:
+            logging.debug("Failed to cleanup rules !")
+            return False
+
+    @staticmethod
+    def add_fw_framework(bridge_name):
+        try:
+            cfo = ConfigFileOps("/etc/sysctl.conf")
+            cfo.addEntry("net.bridge.bridge-nf-call-arptables", "1")
+            cfo.addEntry("net.bridge.bridge-nf-call-iptables", "1")
+            cfo.addEntry("net.bridge.bridge-nf-call-ip6tables", "1")
+            cfo.save()
+
+            execute("sysctl -p /etc/sysctl.conf")
+        except:
+            logging.debug("failed to turn on bridge netfilter")
+            return False
+
+        brfw = "BF-" + bridge_name
+        try:
+            execute("iptables -L " + brfw)
+        except:
+            execute("iptables -N " + brfw)
+
+        brfwout = brfw + "-OUT"
+        try:
+            execute("iptables -L " + brfwout)
+        except:
+            execute("iptables -N " + brfwout)
+
+        brfwin = brfw + "-IN"
+        try:
+            execute("iptables -L " + brfwin)
+        except:
+            execute("iptables -N " + brfwin)
+
+        try:
+            refs = execute("iptables -n -L  " + brfw + " |grep " + brfw + " | cut -d \( -f2 | awk '{print $1}'").strip()
+            if refs == "0":
+                execute("iptables -I FORWARD -i " + bridge_name + " -j DROP")
+                execute("iptables -I FORWARD -o " + bridge_name + " -j DROP")
+                execute("iptables -I FORWARD -i " + bridge_name + " -m physdev --physdev-is-bridged -j " + brfw)
+                execute("iptables -I FORWARD -o " + bridge_name + " -m physdev --physdev-is-bridged -j " + brfw)
+                phydev = execute("brctl show |grep " + bridge_name + " | awk '{print $4}'").strip()
+                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-out " + phydev + " -j ACCEPT")
+                execute("iptables -A " + brfw + " -m state --state RELATED,ESTABLISHED -j ACCEPT")
+                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-is-out -j " + brfwout)
+                execute("iptables -A " + brfw + " -m physdev --physdev-is-bridged --physdev-is-in -j " + brfwin)              
+        
+            return True
+        except:
+            try:
+                execute("iptables -F " + brfw)
+            except:
+                return False
+            
+            return False
+
+    @staticmethod
+    def default_network_rules_user_vm(vm_name, vm_id, vm_ip, vm_mac, vif, bridge_name):
+        if not OvmSecurityGroup.add_fw_framework(bridge_name):
+            return False 
+
+        OvmSecurityGroup.delete_iptables_rules_for_vm(vm_name)
+        OvmSecurityGroup.delete_ebtables_rules_for_vm(vm_name)
+        
+        bridge_firewall_chain = "BF-" + bridge_name    
+        vm_chain = vm_name
+        default_vm_chain = '-'.join(vm_chain.split('-')[:-1]) + "-def"
+        dom_id = getDomId(vm_name)
+
+        try:
+            execute("iptables -N " + vm_chain)
+        except:
+            execute("iptables -F " + vm_chain)
+            
+        try:
+            execute("iptables -N " + default_vm_chain)
+        except:
+            execute("iptables -F " + default_vm_chain)
+
+        try:
+            execute("iptables -A " + bridge_firewall_chain + "-OUT" + " -m physdev --physdev-is-bridged --physdev-out " + vif + " -j " +  default_vm_chain)
+            execute("iptables -A " + bridge_firewall_chain + "-IN" + " -m physdev --physdev-is-bridged --physdev-in " +  vif + " -j " + default_vm_chain)
+            execute("iptables -A  " + default_vm_chain + " -m state --state RELATED,ESTABLISHED -j ACCEPT")
+
+            # Allow DHCP
+            execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-in " + vif + " -p udp --dport 67 --sport 68 -j ACCEPT")
+            execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-out " + vif + " -p udp --dport 68 --sport 67  -j ACCEPT")
+
+            # Don't let a VM spoof its ip address
+            if vm_ip is not None:
+                execute("iptables -A " + default_vm_chain + " -m physdev --physdev-is-bridged --physdev-in " + vif  + " --source " +  vm_ip +  " -j ACCEPT")
+
+            execute("iptables -A " + default_vm_chain + " -j " +  vm_chain)
+            execute("iptables -A " + vm_chain + " -j DROP")
+        except:
+            logging.debug("Failed to program default rules for vm " + vm_name)
+            return False
+        
+        OvmSecurityGroup.default_ebtables_rules(vm_chain, vm_ip, vm_mac, vif)
+        
+        if vm_ip is not None:
+            if (OvmSecurityGroup.write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, '_initial_', '-1') == False):
+                logging.debug("Failed to log default network rules, ignoring")
+            
+        logging.debug("Programmed default rules for vm " + vm_name)
+        return True
+
+    @staticmethod
+    def default_ebtables_rules(vm_name, vm_ip, vm_mac, vif):
+        vm_chain_in = vm_name + "-in"
+        vm_chain_out = vm_name + "-out"
+        
+        for chain in [vm_chain_in, vm_chain_out]:
+            try:
+                execute("ebtables -t nat -N " + chain)
+            except:
+                execute("ebtables -t nat -F " + chain) 
+
+        try:
+            execute("ebtables -t nat -A PREROUTING -i " + vif + " -j " +  vm_chain_in)
+            execute("ebtables -t nat -A POSTROUTING -o " + vif + " -j " + vm_chain_out)
+        except:
+            logging.debug("Failed to program default rules")
+            return False
+        
+        try:
+            execute("ebtables -t nat -A " +  vm_chain_in + " -s ! " +  vm_mac + " -j DROP")
+            execute("ebtables -t nat -A " +  vm_chain_in  + " -p ARP -s ! " + vm_mac + " -j DROP")
+            execute("ebtables -t nat -A " +  vm_chain_in  + " -p ARP --arp-mac-src ! " + vm_mac + " -j DROP")
+            if vm_ip is not None:
+                execute("ebtables -t nat -A " + vm_chain_in  +  " -p ARP --arp-ip-src ! " + vm_ip + " -j DROP")
+            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP --arp-op Request -j ACCEPT")   
+            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP --arp-op Reply -j ACCEPT")    
+            execute("ebtables -t nat -A " + vm_chain_in  + " -p ARP  -j DROP")    
+        except:
+            logging.exception("Failed to program default ebtables IN rules")
+            return False
+       
+        try:
+            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Reply --arp-mac-dst ! " +  vm_mac + " -j DROP")
+            if vm_ip is not None:
+                execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-ip-dst ! " + vm_ip + " -j DROP") 
+            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Request -j ACCEPT")   
+            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP --arp-op Reply -j ACCEPT")    
+            execute("ebtables -t nat -A " + vm_chain_out + " -p ARP -j DROP")    
+        except:
+            logging.debug("Failed to program default ebtables OUT rules")
+            return False
+
+        return True
+
+    @staticmethod
+    def add_network_rules(vm_name, vm_id, vm_ip, signature, seqno, vm_mac, rules, vif, bridge_name):
+        try:
+            vm_chain = vm_name
+            dom_id = getDomId(vm_name)
+            
+            changes = []
+            changes = OvmSecurityGroup.check_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno)
+        
+            if not 1 in changes:
+                logging.debug("Rules already programmed for vm " + vm_name)
+                return True
+        
+            if changes[0] or changes[1] or changes[2] or changes[3]:
+                if not OvmSecurityGroup.default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, bridge_name):
+                    return False
+
+            if rules == "" or rules == None:
+                lines = []
+            else:
+                lines = rules.split(';')[:-1]
+
+            logging.debug("Programming network rules for  IP: " + vm_ip + " vmname=" + vm_name)
+            execute("iptables -F " + vm_chain)
+        
+            for line in lines:            
+                tokens = line.split(':')
+                if len(tokens) != 4:
+                    continue
+                protocol = tokens[0]
+                start = tokens[1]
+                end = tokens[2]
+                cidrs = tokens.pop();
+                ips = cidrs.split(",")
+                ips.pop()
+                allow_any = False
+                if  '0.0.0.0/0' in ips:
+                    i = ips.index('0.0.0.0/0')
+                    del ips[i]
+                    allow_any = True
+                    
+                port_range = start + ":" + end
+                if ips:    
+                    if protocol == 'all':
+                        for ip in ips:
+                            execute("iptables -I " + vm_chain + " -m state --state NEW -s " + ip + " -j ACCEPT")
+                    elif protocol != 'icmp':
+                        for ip in ips:
+                            execute("iptables -I " + vm_chain + " -p " + protocol + " -m " + protocol + " --dport " + port_range + " -m state --state NEW -s " + ip + " -j ACCEPT")
+                    else:
+                        port_range = start + "/" + end
+                        if start == "-1":
+                            port_range = "any"
+                            for ip in ips:
+                                execute("iptables -I " + vm_chain + " -p icmp --icmp-type " + port_range + " -s " + ip + " -j ACCEPT")
+            
+                if allow_any and protocol != 'all':
+                    if protocol != 'icmp':
+                        execute("iptables -I " + vm_chain + " -p " + protocol + " -m " +  protocol + " --dport " + port_range + " -m state --state NEW -j ACCEPT")
+                    else:
+                        port_range = start + "/" + end
+                        if start == "-1":
+                            port_range = "any"
+                            execute("iptables -I " + vm_chain + " -p icmp --icmp-type " + port_range + " -j ACCEPT")
+        
+            iptables =  "iptables -A " + vm_chain + " -j DROP"       
+            execute(iptables)
+            
+            return OvmSecurityGroup.write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno)        
+        except:
+            logging.debug("Failed to network rule !: " + sys.exc_type)
+            return False
+
+    @staticmethod
+    def delete_all_network_rules_for_vm(vm_name, vif = None):            
+        OvmSecurityGroup.delete_iptables_rules_for_vm(vm_name)
+        OvmSecurityGroup.delete_ebtables_rules_for_vm(vm_name)
+
+        vm_chain = vm_name
+        default_vm_chain = None
+        if vm_name.startswith('i-') or vm_name.startswith('r-'):
+            default_vm_chain =  '-'.join(vm_name.split('-')[:-1]) + "-def"
+        
+        try:
+            if default_vm_chain != None: 
+                execute("iptables -F " + default_vm_chain)
+        except:
+            logging.debug("Ignoring failure to delete chain " + default_vm_chain)
+        
+        try:
+            if default_vm_chain != None: 
+                execute("iptables -X " + vmchain_default)
+        except:
+            logging.debug("Ignoring failure to delete chain " + default_vm_chain)
+
+        try:
+            execute("iptables -F " + vm_chain)
+        except:
+            logging.debug("Ignoring failure to delete  chain " + vm_chain)
+        
+        try:
+            execute("iptables -X " + vm_chain)
+        except:
+            logging.debug("Ignoring failure to delete  chain " + vm_chain)
+        
+        if vif is not None:
+            try:
+                dnats = execute("iptables-save -t nat | grep " + vif + " | sed 's/-A/-D/'").split("\n")
+                for dnat in dnats:
+                    try:
+                        execute("iptables -t nat " + dnat)
+                    except:
+                        logging.debug("Igoring failure to delete dnat: " + dnat) 
+            except:
+                pass
+            
+        OvmSecurityGroup.remove_rule_log_for_vm(vm_name)
+        
+        if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-'] ]:
+            return True
+        
+        return True
+
+    @staticmethod
+    def delete_iptables_rules_for_vm(vm_name):
+        vm_name = OvmSecurityGroup.truncate_vm_name(vm_name)
+        vm_chain = vm_name
+        query = "iptables-save | grep " +  vm_chain + " | grep physdev-is-bridged | sed 's/-A/-D/'"
+        delete_cmds = execute(query).split('\n')
+        delete_cmds.pop()
+        
+        for cmd in delete_cmds:
+            try:
+                execute("iptables " + cmd)
+            except:
+                logging.exception("Ignoring failure to delete rules for vm " + vm_name)
+
+    @staticmethod
+    def delete_ebtables_rules_for_vm(vm_name):
+        vm_name = OvmSecurityGroup.truncate_vm_name(vm_name)        
+        query = "ebtables -t nat -L --Lx | grep ROUTING | grep " +  vm_name + " | sed 's/-A/-D/'"
+        delete_cmds = execute(query).split('\n')
+        delete_cmds.pop()
+
+        for cmd in delete_cmds:
+            try:
+                execute(cmd)
+            except:
+                logging.debug("Ignoring failure to delete ebtables rules for vm " + vm_name)
+                
+        chains = [vm_name + "-in", vm_name + "-out"]
+        
+        for chain in chains:
+            try:
+                execute("ebtables -t nat -F " +  chain)
+                execute("ebtables -t nat -X " +  chain)
+            except:
+                logging.debug("Ignoring failure to delete ebtables chain for vm " + vm_name)
+
+    @staticmethod
+    def truncate_vm_name(vm_name):
+        if vm_name.startswith('i-') or vm_name.startswith('r-'):
+            truncated_vm_name = '-'.join(vm_name.split('-')[:-1])
+        else:
+            truncated_vm_name = vm_name
+        return truncated_vm_name        
+
+    @staticmethod
+    def write_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno):
+        log_file_name = "/var/run/cloud/" + vm_name + ".log"
+        logging.debug("Writing log to " + log_file_name)
+        logf = open(log_file_name, 'w')
+        output = ','.join([vm_name, vm_id, vm_ip, dom_id, signature, seqno])
+
+        result = True        
+        try:
+            logf.write(output)
+            logf.write('\n')
+        except:
+            logging.debug("Failed to write to rule log file " + log_file_name)
+            result = False
+            
+        logf.close()        
+        return result
+
+    @staticmethod
+    def remove_rule_log_for_vm(vm_name):
+        log_file_name = "/var/run/cloud/" + vm_name +".log"
+
+        result = True
+        try:
+            os.remove(log_file_name)
+        except:
+            logging.debug("Failed to delete rule log file " + log_file_name)
+            result = False
+        
+        return result
+
+    @staticmethod
+    def check_rule_log_for_vm(vm_name, vm_id, vm_ip, dom_id, signature, seqno):
+        log_file_name = "/var/run/cloud/" + vm_name + ".log"
+        if not os.path.exists(log_file_name):
+            return [True, True, True, True, True, True]
+            
+        try:
+            lines = (line.rstrip() for line in open(log_file_name))
+        except:
+            logging.debug("failed to open " + log_file_name) 
+            return [True, True, True, True, True, True]
+
+        [_vm_name, _vm_id, _vm_ip, _dom_id, _signature, _seqno] = ['_', '-1', '_', '-1', '_', '-1']
+        try:
+            for line in lines:
+                [_vm_name, _vm_id, _vm_ip, _dom_id, _signature, _seqno] = line.split(',')
+                break
+        except:
+            logging.debug("Failed to parse log file for vm " + vm_name)
+            remove_rule_log_for_vm(vm_name)
+            return [True, True, True, True, True, True]
+        
+        return [(vm_name != _vm_name), (vm_id != _vm_id), (vm_ip != _vm_ip), (dom_id != _dom_id), (signature != _signature), (seqno != _seqno)]
+
+    
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+            

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
new file mode 100755
index 0000000..9d83037
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py
@@ -0,0 +1,428 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+from OvmCommonModule import *
+from OVSSiteSR import sp_create, sr_create, sr_do
+from OVSParser import parse_ocfs2_cluster_conf
+from OVSXCluster import clusterm_set_ocfs2_cluster_conf, clusterm_start_o2cb_service
+from OVSSiteRMServer import get_master_ip
+from OvmOCFS2Module import OvmOCFS2
+import re
+
+class OvmStoragePoolDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        dct = asciiLoads(jStr)
+        pool = OvmStoragePool()
+        setAttrFromDict(pool, 'uuid', dct)
+        setAttrFromDict(pool, 'type', dct)
+        setAttrFromDict(pool, 'path', dct)
+        return pool
+
+class OvmStoragePoolEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmStoragePool): raise Exception("%s is not instance of OvmStoragePool"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'uuid')
+        safeDictSet(obj, dct, 'type')
+        safeDictSet(obj, dct, 'path')
+        safeDictSet(obj, dct, 'mountPoint')
+        safeDictSet(obj, dct, 'totalSpace')
+        safeDictSet(obj, dct, 'freeSpace')
+        safeDictSet(obj, dct, 'usedSpace')
+        return dct
+
+def fromOvmStoragePool(pool):
+    return normalizeToGson(json.dumps(pool, cls=OvmStoragePoolEncoder))
+
+def toOvmStoragePool(jStr):
+    return json.loads(jStr, cls=OvmStoragePoolDecoder)
+
+logger = OvmLogger('OvmStoragePool')   
+class OvmStoragePool(OvmObject):
+    uuid = ''
+    type = ''
+    path = ''
+    mountPoint = ''
+    totalSpace = 0
+    freeSpace = 0
+    usedSpace = 0
+
+    def _getSrByNameLable(self, poolUuid):
+        d = db_dump('sr')
+        for uuid, sr in d.items():
+            if sr.name_label == poolUuid:
+                return sr
+    
+        raise Exception("No SR matching to %s" % poolUuid)
+    
+    def _getSpaceinfoOfDir(self, dir):
+        stat = os.statvfs(dir)
+        freeSpace = stat.f_frsize * stat.f_bavail;
+        totalSpace = stat.f_blocks * stat.f_frsize;
+        return (totalSpace, freeSpace)
+    
+    def _checkDirSizeForImage(self, dir, image):
+        (x, free_storage_size) = OvmStoragePool()._getSpaceinfoOfDir(dir)
+        image_size = os.path.getsize(image)
+        if image_size > (free_storage_size + 1024 * 1024 * 1024):
+            raise Exception("No space on dir %s (free storage:%s, vm size:%s)"%(dir, free_storage_size, image_size))
+         
+    def _getAllMountPoints(self):
+        mps = []
+        d = db_dump('sr')
+        for uuid, sr in d.items():
+            mps.append(sr.mountpoint)
+        return mps
+    
+    def _isMounted(self, path):
+        res = doCmd(['mount'])
+        return (path in res)
+    
+    def _mount(self, target, mountpoint, readonly=False):
+        if not exists(mountpoint):
+            os.makedirs(mountpoint)
+            
+        if not OvmStoragePool()._isMounted(mountpoint):
+            if readonly:
+                doCmd(['mount', target, mountpoint, '-r'])
+            else:
+                doCmd(['mount', target, mountpoint])
+    
+    def _umount(self, mountpoint):
+        umountCmd = ['umount', '-f', mountpoint]
+        doCmd(umountCmd)
+        ls = os.listdir(mountpoint)
+        if len(ls) == 0:
+            rmDirCmd = ['rm', '-r', mountpoint]
+            doCmd(rmDirCmd)
+        else:
+            logger.warning(OvmStoragePool._umount, "Something wrong when umount %s, there are still files in directory:%s", mountpoint, " ".join(ls))
+         
+    @staticmethod
+    def create(jStr):
+        try:
+            pool = toOvmStoragePool(jStr)
+            logger.debug(OvmStoragePool.create, fromOvmStoragePool(pool))
+            spUuid = jsonSuccessToMap(sp_create(pool.type, pool.path))['uuid']
+            srUuid = jsonSuccessToMap(sr_create(spUuid, name_label=pool.uuid))['uuid']
+            sr_do(srUuid, "initialize")
+            rs = SUCC()
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.create, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.create), errmsg)
+    
+    @staticmethod
+    def getDetailsByUuid(uuid):
+        try:
+            sr = OvmStoragePool()._getSrByNameLable(uuid)
+            pool = OvmStoragePool()
+            safeSetAttr(pool, 'uuid', uuid)
+            #Note: the sr.sp.fs_type is not mapped to its class name which we use in mgmt server
+            safeSetAttr(pool, 'type', sr.sp.__class__.__name__)
+            safeSetAttr(pool, 'path', sr.sp.get_fs_spec())
+            safeSetAttr(pool, 'mountPoint', sr.mountpoint)
+            (totalSpace, freeSpace) = OvmStoragePool()._getSpaceinfoOfDir(sr.mountpoint)
+            safeSetAttr(pool, 'totalSpace', totalSpace)
+            safeSetAttr(pool, 'freeSpace', freeSpace)
+            safeSetAttr(pool, 'usedSpace', totalSpace - freeSpace)
+            res = fromOvmStoragePool(pool)
+            logger.debug(OvmStoragePool.getDetailsByUuid, res)
+            return res
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.getDetailsByUuid, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.getDetailsByUuid), errmsg)
+    
+    @staticmethod
+    def downloadTemplate(uuid, secPath):
+        secMountPoint = None
+        try:
+            logger.debug(OvmStoragePool.downloadTemplate, "download %s to pool %s"%(secPath, uuid))
+            try:
+                tmpUuid = get_uuid()
+                secMountPoint = join("/var/cloud/", tmpUuid)
+                if not exists(secMountPoint):
+                    os.makedirs(secMountPoint)
+    
+                templateFile = None
+                if secPath.endswith("raw"):
+                    secPathDir = os.path.dirname(secPath)
+                    templateFile = os.path.basename(secPath)
+                else:
+                    secPathDir = secPath
+                    
+                # mount as read-only
+                mountCmd = ['mount.nfs', secPathDir, secMountPoint, '-r']
+                doCmd(mountCmd)
+    
+                if not templateFile:
+                    for f in os.listdir(secMountPoint):
+                        if isfile(join(secMountPoint, f)) and f.endswith('raw'):
+                            templateFile = f
+                            break    
+        
+                if not templateFile:
+                    raise Exception("Can not find raw template in secondary storage")
+                templateSecPath = join(secMountPoint, templateFile)
+    
+                sr = OvmStoragePool()._getSrByNameLable(uuid)
+                priStorageMountPoint = sr.mountpoint
+                # Although mgmt server will check the size, we check again for safety
+                OvmStoragePool()._checkDirSizeForImage(priStorageMountPoint, templateSecPath)
+                seedDir = join(priStorageMountPoint, 'seed_pool', tmpUuid)
+                if exists(seedDir):
+                    raise Exception("%s already here, cannot override existing template" % seedDir)
+                os.makedirs(seedDir)
+    
+                tgt = join(seedDir, templateFile)
+                cpTemplateCmd = ['cp', templateSecPath, tgt]
+                logger.info(OvmStoragePool.downloadTemplate, " ".join(cpTemplateCmd))
+                doCmd(cpTemplateCmd)
+                templateSize = os.path.getsize(tgt) 
+                logger.info(OvmStoragePool.downloadTemplate, "primary_storage_download success:installPath:%s, templateSize:%s"%(tgt,templateSize))
+                rs = toGson({"installPath":tgt, "templateSize":templateSize})
+                return rs
+            except Exception, e:
+                errmsg = fmt_err_msg(e)
+                logger.error(OvmStoragePool.downloadTemplate, errmsg)
+                raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.downloadTemplate), errmsg)
+        finally:
+            if exists(secMountPoint):
+                try:
+                    OvmStoragePool()._umount(secMountPoint)
+                except Exception, e:
+                    errmsg = fmt_err_msg(e)
+                    logger.error(OvmStoragePool.downloadTemplate, 'unmount secondary storage at %s failed, %s'%(secMountPoint, errmsg))
+
+    @staticmethod
+    def prepareOCFS2Nodes(clusterName, nodeString):        
+        def configureEtcHosts(nodes):
+            if not exists(ETC_HOSTS):
+                orignalConf = ""
+            else:
+                fd = open(ETC_HOSTS, "r")
+                orignalConf = fd.read()
+                fd.close()
+            
+            pattern = r"(.*%s.*)|(.*%s.*)"
+            newlines = []
+            for n in nodes:
+                p = pattern % (n["ip_address"], n["name"])
+                orignalConf = re.sub(p, "", orignalConf)
+                newlines.append("%s\t%s\n"%(n["ip_address"], n["name"]))
+            
+            orignalConf = orignalConf + "".join(newlines)
+            # remove extra empty lines
+            orignalConf = re.sub(r"\n\s*\n*", "\n", orignalConf)
+            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure /etc/hosts:%s\n"%orignalConf)
+            fd = open(ETC_HOSTS, "w")
+            fd.write(orignalConf)
+            fd.close()
+        
+        def configureHostName(nodes):
+            myIp = successToMap(get_master_ip())['ip']
+            nodeName = None
+            for n in nodes:
+                if myIp == n["ip_address"]:
+                    nodeName = n["name"]
+                    break
+            
+            if nodeName == None: raise Exception("Cannot find node equals to my ip address:%s"%myIp)
+            if not exists(HOSTNAME_FILE):
+                originalConf = ""
+            else:
+                fd = open(HOSTNAME_FILE, "r")
+                originalConf = fd.read()
+                fd.close()
+            
+            pattern = r"HOSTNAME=(.*)"
+            # remove any old hostname
+            originalConf = re.sub(pattern, "", originalConf)
+            # remove extra empty lines
+            originalConf = re.sub(r"\n\s*\n*", "\n", originalConf) + "\n" + "HOSTNAME=%s"%nodeName
+            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure %s:%s\n"%(HOSTNAME_FILE,originalConf))
+            fd = open(HOSTNAME_FILE, "w")
+            fd.write(originalConf)
+            fd.close()
+            doCmd(['hostname', nodeName])
+        
+        def addNodes(nodes, clusterName):
+            ocfs2 = OvmOCFS2()
+            ocfs2._load()
+            isOnline = ocfs2._isClusterOnline(clusterName)
+            if not isOnline:
+                ocfs2._prepareConf(clusterName)
+            
+            for n in nodes:
+                ocfs2._addNode(n['name'], n['number'], n['ip_address'], 7777, clusterName, isOnline)
+            
+        def checkStaleCluster(clusterName):
+            if exists('/sys/kernel/config/cluster/'):
+                dirs = os.listdir('/sys/kernel/config/cluster/')
+                for dir in dirs:
+                    if dir != clusterName:
+                        errMsg = '''CloudStack detected there is a stale cluster(%s) on host %s. Please manually clean up it first then add again by
+1) remove the host from cloudstack 
+2) umount all OCFS2 device on host
+3) /etc/init.d/o2cb offline %s
+4) /etc/init.d/o2cb restart
+if this doesn't resolve the problem, please check oracle manual to see how to offline a cluster
+    ''' % (dir, successToMap(get_master_ip())['ip'], dir)
+                        raise Exception(errMsg)
+            
+        try:
+            checkStaleCluster(clusterName)
+            nodeString = nodeString.strip(";")
+            nodes = []
+            for n in nodeString.split(";"):
+                params = n.split(":")
+                if len(params) != 3: raise Exception("Wrong parameter(%s) in node string(%s)"%(n, nodeString))
+                dict = {"number":params[0], "ip_address":params[1], "name":params[2]}
+                nodes.append(dict)
+            
+            if len(nodes) > 255:
+                raise Exception("%s nodes beyond maximum 255 allowed by OCFS2"%len(nodes))
+            
+            configureHostName(nodes)
+            configureEtcHosts(nodes)
+            addNodes(nodes, clusterName)
+            OvmOCFS2()._start(clusterName)
+            fd = open(OCFS2_CONF, 'r')
+            conf = fd.readlines()
+            fd.close()
+            logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure cluster.conf to:\n%s"%' '.join(conf))
+            rs = SUCC()
+            return rs
+        
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.prepareOCFS2Nodes, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.prepareOCFS2Nodes), errmsg)
+    
+    @staticmethod
+    def createTemplateFromVolume(secStorageMountPath, installPath, volumePath):
+        try:
+            secMountPoint = ""
+            if not isfile(volumePath): raise Exception("Cannot find %s"%volumePath)
+            vmCfg = join(dirname(volumePath), 'vm.cfg')
+            vmName = getVmNameFromConfigureFile(vmCfg)
+            if vmName in doCmd(['xm', 'list']):
+                raise Exception("%s is still running, please stop it first then create template again"%vmName)
+            
+            tmpUuid = get_uuid()
+            secMountPoint = join("/var/cloud/", tmpUuid)
+            OvmStoragePool()._mount(secStorageMountPath, secMountPoint)
+            installPath = installPath.lstrip('/')
+            destPath = join(secMountPoint, installPath)
+            #This prevent us deleting whole secondary in case we got a wrong installPath
+            if destPath == secMountPoint: raise Exception("Install path equals to root of secondary storage(%s)"%destPath)
+            if exists(destPath):
+                logger.warning(OvmStoragePool.createTemplateFromVolume, "%s is already here, delete it since it is most likely stale"%destPath)
+                doCmd(['rm', '-rf', destPath])
+            OvmStoragePool()._checkDirSizeForImage(secMountPoint, volumePath)
+            
+            os.makedirs(destPath)
+            newName = get_uuid() + ".raw"
+            destName = join(destPath, newName)
+            doCmd(['cp', volumePath, destName])
+            size = os.path.getsize(destName)
+            resInstallPath = join(installPath, newName)
+            OvmStoragePool()._umount(secMountPoint)
+            rs = toGson({"installPath":resInstallPath, "templateFileName":newName, "virtualSize":size, "physicalSize":size})
+            return rs
+        
+        except Exception, e:
+            try:
+                if exists(secMountPoint):
+                    OvmStoragePool()._umount(secMountPoint)
+            except Exception, e:
+                logger.warning(OvmStoragePool.createTemplateFromVolume, "umount %s failed"%secMountPoint)       
+                
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.createTemplateFromVolume, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.createTemplateFromVolume), errmsg)
+    
+    @staticmethod
+    def delete(uuid):
+        try:
+            sr = OvmStoragePool()._getSrByNameLable(uuid)
+            primaryStoragePath = sr.mountpoint
+            OvmStoragePool()._umount(primaryStoragePath)
+            rs = SUCC()
+            return rs
+        except Exception, e:
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.delete, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.delete), errmsg) 
+        
+    @staticmethod
+    def copyVolume(secStorageMountPath, volumeFolderOnSecStorage, volumePath, storagePoolUuid, toSec):
+        def copyToSecStorage(secMountPoint, volumeFolderOnSecStorage, volumePath):
+            if not isfile(volumePath): raise Exception("Cannot find volume at %s"%volumePath)
+            OvmStoragePool()._checkDirSizeForImage(secMountPoint, volumePath)
+            volumeFolderOnSecStorage = volumeFolderOnSecStorage.lstrip("/")
+            destPath = join(secMountPoint, volumeFolderOnSecStorage)
+            #This prevent us deleting whole secondary in case we got a wrong volumeFolderOnSecStorage
+            if destPath == secMountPoint: raise Exception("volume path equals to root of secondary storage(%s)"%destPath)
+            if exists(destPath):
+                logger.warning(OvmStoragePool.copyVolume, "%s already exists, delete it first"%destPath)
+                doCmd(['rm', '-rf', destPath])
+            os.makedirs(destPath)
+            newName = get_uuid() + ".raw"
+            destName = join(destPath, newName)
+            doCmd(['cp', volumePath, destName])
+            return destName
+        
+        def copyToPrimary(secMountPoint, volumeFolderOnSecStorage, volumePath, primaryMountPath):
+            srcPath = join(secMountPoint, volumeFolderOnSecStorage.lstrip("/"), volumePath.lstrip("/"))
+            if not srcPath.endswith(".raw"): srcPath = srcPath + ".raw"
+            if not isfile(srcPath): raise Exception("Cannot find volume at %s"%srcPath)
+            if not exists(primaryMountPath): raise Exception("Primary storage(%s) seems to have gone"%primaryMountPath)
+            OvmStoragePool()._checkDirSizeForImage(primaryMountPath, srcPath)
+            destPath = join(primaryMountPath, "sharedDisk")
+            newName = get_uuid() + ".raw"
+            destName = join(destPath, newName)
+            doCmd(['cp', srcPath, destName])
+            return destName
+                      
+        secMountPoint = ""
+        try:
+            tmpUuid = get_uuid()
+            secMountPoint = join("/var/cloud/", tmpUuid)
+            OvmStoragePool()._mount(secStorageMountPath, secMountPoint)
+            if toSec:
+                resultPath = copyToSecStorage(secMountPoint, volumeFolderOnSecStorage, volumePath)
+            else:
+                sr = OvmStoragePool()._getSrByNameLable(storagePoolUuid)
+                primaryStoragePath = sr.mountpoint
+                resultPath = copyToPrimary(secMountPoint, volumeFolderOnSecStorage, volumePath, primaryStoragePath)
+            OvmStoragePool()._umount(secMountPoint)
+            
+            # ingratiate bad mgmt server design, it asks 'installPath' but it only wants the volume name without suffix
+            volumeUuid = basename(resultPath).rstrip(".raw")
+            rs = toGson({"installPath":volumeUuid})
+            return rs
+        except Exception, e:
+            try:
+                if exists(secMountPoint):
+                    OvmStoragePool()._umount(secMountPoint)
+            except Exception, e:
+                logger.warning(OvmStoragePool.copyVolume, "umount %s failed"%secMountPoint)       
+                
+            errmsg = fmt_err_msg(e)
+            logger.error(OvmStoragePool.copyVolume, errmsg)
+            raise XmlRpcFault(toErrCode(OvmStoragePool, OvmStoragePool.copyVolume), errmsg)
+                
+                
+            
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-cloudstack/blob/e2a32ab4/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
new file mode 100755
index 0000000..3405c5a
--- /dev/null
+++ b/plugins/hypervisors/ovm/scripts/vm/hypervisor/ovm/OvmVifModule.py
@@ -0,0 +1,62 @@
+# Copyright 2012 Citrix Systems, Inc. Licensed under the
+# Apache License, Version 2.0 (the "License"); you may not use this
+# file except in compliance with the License.  Citrix Systems, Inc.
+# reserves all rights not expressly granted by the License.
+# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# 
+# Automatically generated by addcopyright.py at 04/03/2012
+'''
+Created on May 17, 2011
+
+@author: frank
+'''
+from OvmCommonModule import *
+
+class OvmVifDecoder(json.JSONDecoder):
+    def decode(self, jStr):
+        deDict = asciiLoads(jStr)
+        vif = OvmVif()
+        vif.mac = deDict['mac']
+        vif.bridge = deDict['bridge']
+        return vif
+    
+class OvmVifEncoder(json.JSONEncoder):
+    def default(self, obj):
+        if not isinstance(obj, OvmVif): raise Exception("%s is not instance of OvmVif"%type(obj))
+        dct = {}
+        safeDictSet(obj, dct, 'mac')
+        safeDictSet(obj, dct, 'bridge')
+        safeDictSet(obj, dct, 'type')
+        safeDictSet(obj, dct, 'name')
+        return dct    
+
+def fromOvmVif(vif):
+    return normalizeToGson(json.dumps(vif, cls=OvmVifEncoder))
+
+def fromOvmVifList(vifList):
+    return [fromOvmVif(v) for v in vifList]
+
+def toOvmVif(jStr):
+    return json.loads(jStr, cls=OvmVifDecoder)
+
+def toOvmVifList(jStr):
+    vifs = []
+    for i in jStr:
+        vif = toOvmVif(i)
+        vifs.append(vif)
+    return vifs
+
+class OvmVif(OvmObject):
+    name = ''
+    mac = ''
+    bridge = ''
+    type = ''
+    mode = ''
+    
+    def toXenString(self):
+        return "%s,%s,%s"%(self.mac, self.bridge, self.type)


Mime
View raw message