cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ahu...@apache.org
Subject [5/8] Merged vmops and vmopspremium. Rename all xapi plugins to start with cloud-plugin-. Rename vmops to cloud-plugin-generic.
Date Fri, 15 Nov 2013 18:24:41 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloud-plugin-s3xen
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloud-plugin-s3xen b/scripts/vm/hypervisor/xenserver/cloud-plugin-s3xen
new file mode 100644
index 0000000..9830897
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/cloud-plugin-s3xen
@@ -0,0 +1,428 @@
+#!/usr/bin/python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Version @VERSION@
+#
+# A plugin for executing script needed by CloudStack
+from copy import copy
+from datetime import datetime
+from httplib import *
+from string import join
+from string import split
+
+import os
+import sys
+import time
+import md5 as md5mod
+import sha
+import base64
+import hmac
+import traceback
+import urllib2
+from xml.dom.minidom import parseString
+
+import XenAPIPlugin
+sys.path.extend(["/opt/xensource/sm/"])
+import util
+
+NULL = 'null'
+
+# Value conversion utility functions ...
+
+
+def to_none(value):
+
+    if value is None:
+        return None
+    if isinstance(value, basestring) and value.strip().lower() == NULL:
+        return None
+    return value
+
+
+def to_bool(value):
+
+    if to_none(value) is None:
+        return False
+    if isinstance(value, basestring) and value.strip().lower() == 'true':
+        return True
+    if isinstance(value, int) and value:
+        return True
+    return False
+
+
+def to_integer(value, default):
+
+    if to_none(value) is None or not isinstance(value, int):
+        return default
+    return int(value)
+
+
+def optional_str_value(value, default):
+
+    if is_not_blank(value):
+        return value
+    return default
+
+
+def is_blank(value):
+
+    return not is_not_blank(value)
+
+
+def is_not_blank(value):
+
+    if to_none(value) is None or not isinstance(value, basestring):
+        return True
+    if value.strip == '':
+        return False
+    return True
+
+
+def get_optional_key(map, key, default=''):
+
+    if key in map:
+        return map[key]
+    return default
+
+
+def log(message):
+
+    util.SMlog('#### CLOUD %s ####' % message)
+
+
+def echo(fn):
+    def wrapped(*v, **k):
+        name = fn.__name__
+        log("enter %s ####" % name)
+        res = fn(*v, **k)
+        log("exit %s with result %s" % (name, res))
+        return res
+    return wrapped
+
+
+def require_str_value(value, error_message):
+
+    if is_not_blank(value):
+        return value
+
+    raise ValueError(error_message)
+
+
+def retry(max_attempts, fn):
+
+    attempts = 1
+    while attempts <= max_attempts:
+        log("Attempting execution " + str(attempts) + "/" + str(
+            max_attempts) + " of " + fn.__name__)
+        try:
+            return fn()
+        except:
+            if (attempts >= max_attempts):
+                raise
+            attempts = attempts + 1
+
+
+def compute_md5(filename, buffer_size=8192):
+
+    hasher = md5mod.md5()
+
+    file = open(filename, 'rb')
+    try:
+
+        data = file.read(buffer_size)
+        while data != "":
+            hasher.update(data)
+            data = file.read(buffer_size)
+
+        return base64.encodestring(hasher.digest())[:-1]
+
+    finally:
+
+        file.close()
+
+
+class S3Client(object):
+
+    DEFAULT_END_POINT = 's3.amazonaws.com'
+    DEFAULT_CONNECTION_TIMEOUT = 50000
+    DEFAULT_SOCKET_TIMEOUT = 50000
+    DEFAULT_MAX_ERROR_RETRY = 3
+
+    HEADER_CONTENT_MD5 = 'Content-MD5'
+    HEADER_CONTENT_TYPE = 'Content-Type'
+    HEADER_CONTENT_LENGTH = 'Content-Length'
+
+    def __init__(self, access_key, secret_key, end_point=None,
+                 https_flag=None, connection_timeout=None, socket_timeout=None,
+                 max_error_retry=None):
+
+        self.access_key = require_str_value(
+            access_key, 'An access key must be specified.')
+        self.secret_key = require_str_value(
+            secret_key, 'A secret key must be specified.')
+        self.end_point = optional_str_value(end_point, self.DEFAULT_END_POINT)
+        self.https_flag = to_bool(https_flag)
+        self.connection_timeout = to_integer(
+            connection_timeout, self.DEFAULT_CONNECTION_TIMEOUT)
+        self.socket_timeout = to_integer(
+            socket_timeout, self.DEFAULT_SOCKET_TIMEOUT)
+        self.max_error_retry = to_integer(
+            max_error_retry, self.DEFAULT_MAX_ERROR_RETRY)
+
+    def build_canocialized_resource(self, bucket, key):
+        if not key.startswith("/"):
+            uri = bucket + "/" + key
+        else:
+            uri = bucket + key
+
+        return "/" + uri
+
+    def noop_send_body(connection):
+        pass
+
+    def noop_read(response):
+        return response.read()
+
+    def do_operation(
+        self, method, bucket, key, input_headers={},
+            fn_send_body=noop_send_body, fn_read=noop_read):
+
+        headers = copy(input_headers)
+        headers['Expect'] = '100-continue'
+
+        uri = self.build_canocialized_resource(bucket, key)
+        signature, request_date = self.sign_request(method, uri, headers)
+        headers['Authorization'] = "AWS " + self.access_key + ":" + signature
+        headers['Date'] = request_date
+
+        def perform_request():
+            connection = None
+            if self.https_flag:
+                connection = HTTPSConnection(self.end_point)
+            else:
+                connection = HTTPConnection(self.end_point)
+
+            try:
+                connection.timeout = self.socket_timeout
+                connection.putrequest(method, uri)
+
+                for k, v in headers.items():
+                    connection.putheader(k, v)
+                connection.endheaders()
+
+                fn_send_body(connection)
+
+                response = connection.getresponse()
+                log("Sent " + method + " request to " + self.end_point +
+                    uri + " with headers " + str(headers) +
+                    ".  Received response status " + str(response.status) +
+                    ": " + response.reason)
+
+                return fn_read(response)
+
+            finally:
+                connection.close()
+
+        return retry(self.max_error_retry, perform_request)
+
+    '''
+    See http://bit.ly/MMC5de for more information regarding the creation of
+    AWS authorization tokens and header signing
+    '''
+    def sign_request(self, operation, canocialized_resource, headers):
+
+        request_date = datetime.utcnow(
+        ).strftime('%a, %d %b %Y %H:%M:%S +0000')
+
+        content_hash = get_optional_key(headers, self.HEADER_CONTENT_MD5)
+        content_type = get_optional_key(headers, self.HEADER_CONTENT_TYPE)
+
+        string_to_sign = join(
+            [operation, content_hash, content_type, request_date,
+                canocialized_resource], '\n')
+
+        signature = base64.encodestring(
+            hmac.new(self.secret_key, string_to_sign.encode('utf8'),
+                     sha).digest())[:-1]
+
+        return signature, request_date
+        
+    def getText(self, nodelist):
+        rc = []
+        for node in nodelist:
+            if node.nodeType == node.TEXT_NODE:
+                rc.append(node.data)
+        return ''.join(rc)
+
+    def multiUpload(self, bucket, key, src_fileName, chunkSize=5 * 1024 * 1024):
+        uploadId={}
+        def readInitalMultipart(response):
+           data = response.read()
+           xmlResult = parseString(data) 
+           result = xmlResult.getElementsByTagName("InitiateMultipartUploadResult")[0]
+           upload = result.getElementsByTagName("UploadId")[0]
+           uploadId["0"] = upload.childNodes[0].data
+       
+        self.do_operation('POST', bucket, key + "?uploads", fn_read=readInitalMultipart) 
+
+        fileSize = os.path.getsize(src_fileName) 
+        parts = fileSize / chunkSize + ((fileSize % chunkSize) and 1)
+        part = 1
+        srcFile = open(src_fileName, 'rb')
+        etags = []
+        while part <= parts:
+            offset = part - 1
+            size = min(fileSize - offset * chunkSize, chunkSize)
+            headers = {
+                self.HEADER_CONTENT_LENGTH: size
+            }
+            def send_body(connection): 
+               srcFile.seek(offset * chunkSize)
+               block = srcFile.read(size)
+               connection.send(block)
+            def read_multiPart(response):
+               etag = response.getheader('ETag') 
+               etags.append((part, etag))
+            self.do_operation("PUT", bucket, "%s?partNumber=%s&uploadId=%s"%(key, part, uploadId["0"]), headers, send_body, read_multiPart)
+            part = part + 1
+        srcFile.close()
+
+        data = [] 
+        partXml = "<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>"
+        for etag in etags:
+            data.append(partXml%etag)
+        msg = "<CompleteMultipartUpload>%s</CompleteMultipartUpload>"%("".join(data))
+        size = len(msg)
+        headers = {
+            self.HEADER_CONTENT_LENGTH: size
+        }
+        def send_complete_multipart(connection):
+            connection.send(msg) 
+        self.do_operation("POST", bucket, "%s?uploadId=%s"%(key, uploadId["0"]), headers, send_complete_multipart)
+
+    def put(self, bucket, key, src_filename, maxSingleUpload):
+
+        if not os.path.isfile(src_filename):
+            raise Exception(
+                "Attempt to put " + src_filename + " that does not exist.")
+
+        size = os.path.getsize(src_filename)
+        if size > maxSingleUpload or maxSingleUpload == 0:
+            return self.multiUpload(bucket, key, src_filename)
+           
+        headers = {
+            self.HEADER_CONTENT_MD5: compute_md5(src_filename),
+        
+            self.HEADER_CONTENT_TYPE: 'application/octet-stream',
+            self.HEADER_CONTENT_LENGTH: str(os.stat(src_filename).st_size),
+        }
+
+        def send_body(connection):
+            src_file = open(src_filename, 'rb')
+            try:
+                while True:
+                    block = src_file.read(8192)
+                    if not block:
+                        break
+                    connection.send(block)
+
+            except:
+                src_file.close()
+
+        self.do_operation('PUT', bucket, key, headers, send_body)
+
+    def get(self, bucket, key, target_filename):
+
+        def read(response):
+
+            file = open(target_filename, 'wb')
+
+            try:
+
+                while True:
+                    block = response.read(8192)
+                    if not block:
+                        break
+                    file.write(block)
+            except:
+
+                file.close()
+
+        return self.do_operation('GET', bucket, key, fn_read=read)
+
+    def delete(self, bucket, key):
+
+        return self.do_operation('DELETE', bucket, key)
+
+
+def parseArguments(args):
+
+    # The keys in the args map will correspond to the properties defined on
+    # the com.cloud.utils.S3Utils#ClientOptions interface
+    client = S3Client(
+        args['accessKey'], args['secretKey'], args['endPoint'],
+        args['https'], args['connectionTimeout'], args['socketTimeout'])
+
+    operation = args['operation']
+    bucket = args['bucket']
+    key = args['key']
+    filename = args['filename']
+    maxSingleUploadBytes = int(args["maxSingleUploadSizeInBytes"])
+
+    if is_blank(operation):
+        raise ValueError('An operation must be specified.')
+
+    if is_blank(bucket):
+        raise ValueError('A bucket must be specified.')
+
+    if is_blank(key):
+        raise ValueError('A value must be specified.')
+
+    if is_blank(filename):
+        raise ValueError('A filename must be specified.')
+
+    return client, operation, bucket, key, filename, maxSingleUploadBytes
+
+
+@echo
+def s3(session, args):
+
+    client, operation, bucket, key, filename, maxSingleUploadBytes = parseArguments(args)
+
+    try:
+
+        if operation == 'put':
+            client.put(bucket, key, filename, maxSingleUploadBytes)
+        elif operation == 'get':
+            client.get(bucket, key, filename)
+        elif operation == 'delete':
+            client.delete(bucket, key, filename)
+        else:
+            raise RuntimeError(
+                "S3 plugin does not support operation " + operation)
+
+        return 'true'
+
+    except:
+        log("Operation " + operation + " on file " + filename +
+            " from/in bucket " + bucket + " key " + key)
+        log(traceback.format_exc())
+        return 'false'
+
+if __name__ == "__main__":
+    XenAPIPlugin.dispatch({"s3": s3})

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloud-plugin-snapshot
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloud-plugin-snapshot b/scripts/vm/hypervisor/xenserver/cloud-plugin-snapshot
new file mode 100644
index 0000000..7ad892d
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/cloud-plugin-snapshot
@@ -0,0 +1,597 @@
+#!/usr/bin/python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Version @VERSION@
+#
+# A plugin for executing script needed by CloudStack 
+
+import os, sys, time
+import XenAPIPlugin
+sys.path.append("/opt/xensource/sm/")
+import SR, VDI, SRCommand, util, lvutil
+from util import CommandException
+import vhdutil
+import shutil
+import lvhdutil
+import errno
+import subprocess
+import xs_errors
+import cleanup
+import stat
+import random
+import cloud-plugin-lib as lib
+import logging
+
+lib.setup_logging("/var/log/cloud/cloud-plugins.log")
+
+VHD_UTIL = '/usr/bin/vhd-util'
+VHD_PREFIX = 'VHD-'
+CLOUD_DIR = '/var/run/cloud_mount'
+
+def echo(fn):
+    def wrapped(*v, **k):
+        name = fn.__name__
+        logging.debug("#### CLOUD enter  %s ####" % name )
+        res = fn(*v, **k)
+        logging.debug("#### CLOUD exit  %s ####" % name )
+        return res
+    return wrapped
+
+
+@echo
+def create_secondary_storage_folder(session, args):
+    local_mount_path = None
+
+    logging.debug("create_secondary_storage_folder, args: " + str(args))
+
+    try:
+        try:
+            # Mount the remote resource folder locally
+            remote_mount_path = args["remoteMountPath"]
+            local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
+            mount(remote_mount_path, local_mount_path)
+
+            # Create the new folder
+            new_folder = local_mount_path + "/" + args["newFolder"]
+            if not os.path.isdir(new_folder):
+                current_umask = os.umask(0)
+                os.makedirs(new_folder)
+                os.umask(current_umask)
+        except OSError, (errno, strerror):
+            errMsg = "create_secondary_storage_folder failed: errno: " + str(errno) + ", strerr: " + strerror
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+        except:
+            errMsg = "create_secondary_storage_folder failed."
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+    finally:
+        if local_mount_path != None:
+            # Unmount the local folder
+            umount(local_mount_path)
+            # Remove the local folder
+            os.system("rmdir " + local_mount_path)
+        
+    return "1"
+
+@echo
+def delete_secondary_storage_folder(session, args):
+    local_mount_path = None
+
+    logging.debug("delete_secondary_storage_folder, args: " + str(args))
+
+    try:
+        try:
+            # Mount the remote resource folder locally
+            remote_mount_path = args["remoteMountPath"]
+            local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
+            mount(remote_mount_path, local_mount_path)
+
+            # Delete the specified folder
+            folder = local_mount_path + "/" + args["folder"]
+            if os.path.isdir(folder):
+                os.system("rm -f " + folder + "/*")
+                os.system("rmdir " + folder)
+        except OSError, (errno, strerror):
+            errMsg = "delete_secondary_storage_folder failed: errno: " + str(errno) + ", strerr: " + strerror
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+        except:
+            errMsg = "delete_secondary_storage_folder failed."
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+    finally:
+        if local_mount_path != None:
+            # Unmount the local folder
+            umount(local_mount_path)
+            # Remove the local folder
+            os.system("rmdir " + local_mount_path)
+        
+    return "1"
+     
+@echo
+def post_create_private_template(session, args):
+    local_mount_path = None
+    try:
+        try:
+            # get local template folder 
+            templatePath = args["templatePath"]
+            local_mount_path = os.path.join(CLOUD_DIR, util.gen_uuid())
+            mount(templatePath, local_mount_path)
+            # Retrieve args
+            filename = args["templateFilename"]
+            name = args["templateName"]
+            description = args["templateDescription"]
+            checksum = args["checksum"]
+            file_size = args["size"]
+            virtual_size = args["virtualSize"]
+            template_id = args["templateId"]
+           
+            # Create the template.properties file
+            template_properties_install_path = local_mount_path + "/template.properties"
+            f = open(template_properties_install_path, "w")
+            f.write("filename=" + filename + "\n")
+            f.write("vhd=true\n")
+            f.write("id=" + template_id + "\n")
+            f.write("vhd.filename=" + filename + "\n")
+            f.write("public=false\n")
+            f.write("uniquename=" + name + "\n")
+            f.write("vhd.virtualsize=" + virtual_size + "\n")
+            f.write("virtualsize=" + virtual_size + "\n")
+            f.write("checksum=" + checksum + "\n")
+            f.write("hvm=true\n")
+            f.write("description=" + description + "\n")
+            f.write("vhd.size=" + str(file_size) + "\n")
+            f.write("size=" + str(file_size) + "\n")
+            f.close()
+            logging.debug("Created template.properties file")
+           
+            # Set permissions
+            permissions = stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH
+            os.chmod(template_properties_install_path, permissions)
+            logging.debug("Set permissions on template and template.properties")
+
+        except:
+            errMsg = "post_create_private_template failed."
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+
+    finally:
+        if local_mount_path != None:
+            # Unmount the local folder
+            umount(local_mount_path)
+            # Remove the local folder
+            os.system("rmdir " + local_mount_path)
+    return "1" 
+  
+def isfile(path, isISCSI):
+    errMsg = ''
+    exists = True
+    if isISCSI:
+        exists = checkVolumeAvailablility(path)
+    else:
+        exists = os.path.isfile(path)
+        
+    if not exists:
+        errMsg = "File " + path + " does not exist."
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    return errMsg
+
+def copyfile(fromFile, toFile, isISCSI):
+    logging.debug("Starting to copy " + fromFile + " to " + toFile)
+    errMsg = ''
+    try:
+        cmd = ['dd', 'if=' + fromFile, 'of=' + toFile, 'bs=4M']
+        txt = util.pread2(cmd)
+    except:
+        try:
+            os.system("rm -f " + toFile)
+        except:
+            txt = ''
+        txt = ''
+        errMsg = "Error while copying " + fromFile + " to " + toFile + " in secondary storage"
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+
+    logging.debug("Successfully copied " + fromFile + " to " + toFile)
+    return errMsg
+
+def chdir(path):
+    try:
+        os.chdir(path)
+    except OSError, (errno, strerror):
+        errMsg = "Unable to chdir to " + path + " because of OSError with errno: " + str(errno) + " and strerr: " + strerror
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    logging.debug("Chdired to " + path)
+    return
+
+def scanParent(path):
+    # Do a scan for the parent for ISCSI volumes
+    # Note that the parent need not be visible on the XenServer
+    parentUUID = ''
+    try:
+        lvName = os.path.basename(path)
+        dirname = os.path.dirname(path)
+        vgName = os.path.basename(dirname) 
+        vhdInfo = vhdutil.getVHDInfoLVM(lvName, lvhdutil.extractUuid, vgName)
+        parentUUID = vhdInfo.parentUuid
+    except:
+        errMsg = "Could not get vhd parent of " + path
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    return parentUUID
+
+def getParent(path, isISCSI):
+    parentUUID = ''
+    try :
+        if isISCSI:
+            parentUUID = vhdutil.getParent(path, lvhdutil.extractUuid)
+        else:
+            parentUUID = vhdutil.getParent(path, cleanup.FileVDI.extractUuid)
+    except:
+        errMsg = "Could not get vhd parent of " + path
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    return parentUUID
+
+def getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI):
+    snapshotVHD    = getVHD(snapshotUuid, isISCSI)
+    snapshotPath   = os.path.join(primarySRPath, snapshotVHD)
+
+    baseCopyUuid = ''
+    if isISCSI:
+        checkVolumeAvailablility(snapshotPath)
+        baseCopyUuid = scanParent(snapshotPath)
+    else:
+        baseCopyUuid = getParent(snapshotPath, isISCSI)
+    
+    logging.debug("Base copy of snapshotUuid: " + snapshotUuid + " is " + baseCopyUuid)
+    return baseCopyUuid
+
+def setParent(parent, child):
+    try:
+        cmd = [VHD_UTIL, "modify", "-p", parent, "-n", child]
+        txt = util.pread2(cmd)
+    except:
+        errMsg = "Unexpected error while trying to set parent of " + child + " to " + parent 
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    logging.debug("Successfully set parent of " + child + " to " + parent)
+    return
+
+def rename(originalVHD, newVHD):
+    try:
+        os.rename(originalVHD, newVHD)
+    except OSError, (errno, strerror):
+        errMsg = "OSError while renaming " + origiinalVHD + " to " + newVHD + "with errno: " + str(errno) + " and strerr: " + strerror
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    return
+
+def makedirs(path):
+    if not os.path.isdir(path):
+        try:
+            os.makedirs(path)
+        except OSError, (errno, strerror):
+            umount(path)
+            if os.path.isdir(path):
+                return
+            errMsg = "OSError while creating " + path + " with errno: " + str(errno) + " and strerr: " + strerror
+            logging.debug(errMsg)
+            raise xs_errors.XenError(errMsg)
+    return
+
+def mount(remoteDir, localDir):
+    makedirs(localDir)
+    options = "soft,tcp,timeo=133,retrans=1"
+    try: 
+        cmd = ['mount', '-o', options, remoteDir, localDir]
+        txt = util.pread2(cmd)
+    except:
+        txt = ''
+        errMsg = "Unexpected error while trying to mount " + remoteDir + " to " + localDir 
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    logging.debug("Successfully mounted " + remoteDir + " to " + localDir)
+
+    return
+
+def umount(localDir):
+    try: 
+        cmd = ['umount', localDir]
+        util.pread2(cmd)
+    except CommandException:
+        errMsg = "CommandException raised while trying to umount " + localDir 
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+
+    logging.debug("Successfully unmounted " + localDir)
+    return
+
+def mountSnapshotsDir(secondaryStorageMountPath, localMountPointPath, path):
+    # The aim is to mount secondaryStorageMountPath on 
+    # And create <accountId>/<instanceId> dir on it, if it doesn't exist already.
+    # Assuming that secondaryStorageMountPath  exists remotely
+
+    # Just mount secondaryStorageMountPath/<relativeDir>/SecondaryStorageHost/ everytime
+    # Never unmount.
+    # path is like "snapshots/account/volumeId", we mount secondary_storage:/snapshots
+    relativeDir = path.split("/")[0]
+    restDir = "/".join(path.split("/")[1:])
+    snapshotsDir = os.path.join(secondaryStorageMountPath, relativeDir)
+
+    makedirs(localMountPointPath)
+    # if something is not mounted already on localMountPointPath,
+    # mount secondaryStorageMountPath on localMountPath
+    if os.path.ismount(localMountPointPath):
+        # There is more than one secondary storage per zone.
+        # And we are mounting each sec storage under a zone-specific directory
+        # So two secondary storage snapshot dirs will never get mounted on the same point on the same XenServer.
+        logging.debug("The remote snapshots directory has already been mounted on " + localMountPointPath)
+    else:
+        mount(snapshotsDir, localMountPointPath)
+
+    # Create accountId/instanceId dir on localMountPointPath, if it doesn't exist
+    backupsDir = os.path.join(localMountPointPath, restDir)
+    makedirs(backupsDir)
+    return backupsDir
+
+def unmountAll(path):
+    try:
+        for dir in os.listdir(path):
+            if dir.isdigit():
+                logging.debug("Unmounting Sub-Directory: " + dir)
+                localMountPointPath = os.path.join(path, dir)
+                umount(localMountPointPath)
+    except:
+        logging.debug("Ignoring the error while trying to unmount the snapshots dir")
+
+@echo
+def unmountSnapshotsDir(session, args):
+    dcId = args['dcId']
+    localMountPointPath = os.path.join(CLOUD_DIR, dcId)
+    localMountPointPath = os.path.join(localMountPointPath, "snapshots")
+    unmountAll(localMountPointPath)
+    try:
+        umount(localMountPointPath)
+    except:
+        logging.debug("Ignoring the error while trying to unmount the snapshots dir.")
+
+    return "1"
+
+def getPrimarySRPath(primaryStorageSRUuid, isISCSI):
+    if isISCSI:
+        primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid
+        return os.path.join(lvhdutil.VG_LOCATION, primarySRDir)
+    else:
+        return os.path.join(SR.MOUNT_BASE, primaryStorageSRUuid)
+
+def getBackupVHD(UUID):
+    return UUID + '.' + SR.DEFAULT_TAP
+
+def getVHD(UUID, isISCSI):
+    if isISCSI:
+        return VHD_PREFIX + UUID
+    else:
+        return UUID + '.' + SR.DEFAULT_TAP
+
+def getIsTrueString(stringValue):
+    booleanValue = False
+    if (stringValue and stringValue == 'true'):
+        booleanValue = True
+    return booleanValue 
+
+def makeUnavailable(uuid, primarySRPath, isISCSI):
+    if not isISCSI:
+        return
+    VHD = getVHD(uuid, isISCSI)
+    path = os.path.join(primarySRPath, VHD)
+    manageAvailability(path, '-an')
+    return
+
+def manageAvailability(path, value):
+    if path.__contains__("/var/run/sr-mount"):
+        return
+    logging.debug("Setting availability of " + path + " to " + value)
+    try:
+        cmd = ['/usr/sbin/lvchange', value, path]
+        util.pread2(cmd)
+    except: #CommandException, (rc, cmdListStr, stderr):
+        #errMsg = "CommandException thrown while executing: " + cmdListStr + " with return code: " + str(rc) + " and stderr: " + stderr
+        errMsg = "Unexpected exception thrown by lvchange"
+        logging.debug(errMsg)
+        if value == "-ay":
+            # Raise an error only if we are trying to make it available.
+            # Just warn if we are trying to make it unavailable after the 
+            # snapshot operation is done.
+            raise xs_errors.XenError(errMsg)
+    return
+
+
+def checkVolumeAvailablility(path):
+    try:
+        if not isVolumeAvailable(path):
+            # The VHD file is not available on XenSever. The volume is probably
+            # inactive or detached.
+            # Do lvchange -ay to make it available on XenServer
+            manageAvailability(path, '-ay')
+    except:
+        errMsg = "Could not determine status of ISCSI path: " + path
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    
+    success = False
+    i = 0
+    while i < 6:
+        i = i + 1
+        # Check if the vhd is actually visible by checking for the link
+        # set isISCSI to true
+        success = isVolumeAvailable(path)
+        if success:
+            logging.debug("Made vhd: " + path + " available and confirmed that it is visible")
+            break
+
+        # Sleep for 10 seconds before checking again.
+        time.sleep(10)
+
+    # If not visible within 1 min fail
+    if not success:
+        logging.debug("Could not make vhd: " +  path + " available despite waiting for 1 minute. Does it exist?")
+
+    return success
+
+def isVolumeAvailable(path):
+    # Check if iscsi volume is available on this XenServer.
+    status = "0"
+    try:
+        p = subprocess.Popen(["/bin/bash", "-c", "if [ -L " + path + " ]; then echo 1; else echo 0;fi"], stdout=subprocess.PIPE)
+        status = p.communicate()[0].strip("\n")
+    except:
+        errMsg = "Could not determine status of ISCSI path: " + path
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+
+    return (status == "1")  
+
+def getVhdParent(session, args):
+    logging.debug("getParent with " + str(args))
+    primaryStorageSRUuid      = args['primaryStorageSRUuid']
+    snapshotUuid              = args['snapshotUuid']
+    isISCSI                   = getIsTrueString(args['isISCSI']) 
+
+    primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
+    logging.debug("primarySRPath: " + primarySRPath)
+
+    baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
+
+    return  baseCopyUuid
+
+
+def backupSnapshot(session, args):
+    logging.debug("Called backupSnapshot with " + str(args))
+    primaryStorageSRUuid      = args['primaryStorageSRUuid']
+    secondaryStorageMountPath = args['secondaryStorageMountPath']
+    snapshotUuid              = args['snapshotUuid']
+    prevBackupUuid            = args['prevBackupUuid']
+    backupUuid                = args['backupUuid']
+    isISCSI                   = getIsTrueString(args['isISCSI'])
+    path = args['path']
+    localMountPoint = args['localMountPoint']
+    primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI)
+    logging.debug("primarySRPath: " + primarySRPath)
+
+    baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI)
+    baseCopyVHD  = getVHD(baseCopyUuid, isISCSI)
+    baseCopyPath = os.path.join(primarySRPath, baseCopyVHD)
+    logging.debug("Base copy path: " + baseCopyPath)
+
+
+    # Mount secondary storage mount path on XenServer along the path
+    # /var/run/sr-mount/<dcId>/snapshots/ and create <accountId>/<volumeId> dir
+    # on it.
+    backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path)
+    logging.debug("Backups dir " + backupsDir)
+    prevBackupUuid = prevBackupUuid.split("/")[-1]
+    # Check existence of snapshot on primary storage
+    isfile(baseCopyPath, isISCSI)
+    if prevBackupUuid:
+        # Check existence of prevBackupFile
+        prevBackupVHD = getBackupVHD(prevBackupUuid)
+        prevBackupFile = os.path.join(backupsDir, prevBackupVHD)
+        isfile(prevBackupFile, False)
+
+    # copy baseCopyPath to backupsDir with new uuid
+    backupVHD = getBackupVHD(backupUuid)  
+    backupFile = os.path.join(backupsDir, backupVHD)
+    logging.debug("Back up " + baseCopyUuid + " to Secondary Storage as " + backupUuid)
+    copyfile(baseCopyPath, backupFile, isISCSI)
+    vhdutil.setHidden(backupFile, False)
+
+    # Because the primary storage is always scanned, the parent of this base copy is always the first base copy.
+    # We don't want that, we want a chain of VHDs each of which is a delta from the previous.
+    # So set the parent of the current baseCopyVHD to prevBackupVHD 
+    if prevBackupUuid:
+        # If there was a previous snapshot
+        setParent(prevBackupFile, backupFile)
+
+    txt = "1#" + backupUuid
+    return txt
+
+@echo
+def deleteSnapshotBackup(session, args):
+    logging.debug("Calling deleteSnapshotBackup with " + str(args))
+    secondaryStorageMountPath = args['secondaryStorageMountPath']
+    backupUUID                = args['backupUUID']
+    path = args['path']
+    localMountPoint = args['localMountPoint']
+
+    backupsDir = mountSnapshotsDir(secondaryStorageMountPath, localMountPoint, path)
+    # chdir to the backupsDir for convenience
+    chdir(backupsDir)
+
+    backupVHD = getBackupVHD(backupUUID)
+    logging.debug("checking existence of " + backupVHD)
+
+    # The backupVHD is on secondary which is NFS and not ISCSI.
+    if not os.path.isfile(backupVHD):
+        logging.debug("backupVHD " + backupVHD + "does not exist. Not trying to delete it")
+        return "1"
+    logging.debug("backupVHD " + backupVHD + " exists.")
+        
+    # Just delete the backupVHD
+    try:
+        os.remove(backupVHD)
+    except OSError, (errno, strerror):
+        errMsg = "OSError while removing " + backupVHD + " with errno: " + str(errno) + " and strerr: " + strerror
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+
+    return "1"
+   
+@echo
+def revert_memory_snapshot(session, args):
+    logging.debug("Calling revert_memory_snapshot with " + str(args))
+    vmName = args['vmName']
+    snapshotUUID = args['snapshotUUID']
+    oldVmUuid = args['oldVmUuid']
+    snapshotMemory = args['snapshotMemory']
+    hostUUID = args['hostUUID']
+    try:
+        cmd = '''xe vbd-list vm-uuid=%s | grep 'vdi-uuid' | grep -v 'not in database' | sed -e 's/vdi-uuid ( RO)://g' ''' % oldVmUuid
+        vdiUuids = os.popen(cmd).read().split()
+        cmd2 = '''xe vm-param-get param-name=power-state uuid=''' + oldVmUuid
+        if os.popen(cmd2).read().split()[0] != 'halted':
+            os.system("xe vm-shutdown force=true vm=" + vmName)
+        os.system("xe vm-destroy uuid=" + oldVmUuid)
+        os.system("xe snapshot-revert snapshot-uuid=" + snapshotUUID)
+        if snapshotMemory == 'true':
+            os.system("xe vm-resume vm=" + vmName + " on=" + hostUUID)
+        for vdiUuid in vdiUuids:
+            os.system("xe vdi-destroy uuid=" + vdiUuid)
+    except OSError, (errno, strerror):
+        errMsg = "OSError while reverting vm " + vmName + " to snapshot " + snapshotUUID + " with errno: " + str(errno) + " and strerr: " + strerror
+        logging.debug(errMsg)
+        raise xs_errors.XenError(errMsg)
+    return "0"
+
+if __name__ == "__main__":
+    XenAPIPlugin.dispatch({"getVhdParent":getVhdParent,  "create_secondary_storage_folder":create_secondary_storage_folder, "delete_secondary_storage_folder":delete_secondary_storage_folder, "post_create_private_template":post_create_private_template, "backupSnapshot": backupSnapshot, "deleteSnapshotBackup": deleteSnapshotBackup, "unmountSnapshotsDir": unmountSnapshotsDir, "revert_memory_snapshot":revert_memory_snapshot})
+    
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloud-plugin-swiftxen
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloud-plugin-swiftxen b/scripts/vm/hypervisor/xenserver/cloud-plugin-swiftxen
new file mode 100644
index 0000000..f7ce3b7
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/cloud-plugin-swiftxen
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Version @VERSION@
+#
+# A plugin for executing script needed by cloud  stack
+
+import os, sys, time
+import XenAPIPlugin
+sys.path.extend(["/opt/xensource/sm/"])
+import util
+
+def echo(fn):
+    def wrapped(*v, **k):
+        name = fn.__name__
+        util.SMlog("#### CLOUD enter  %s ####" % name )
+        res = fn(*v, **k)
+        util.SMlog("#### CLOUD exit  %s ####" % name )
+        return res
+    return wrapped
+
+SWIFT = "/opt/cloud/bin/swift"
+
+MAX_SEG_SIZE = 5 * 1024 * 1024 * 1024
+
+def upload(args):
+    url = args['url']
+    account = args['account']
+    username = args['username']
+    key = args['key']
+    container = args['container']
+    ldir = args['ldir']
+    lfilename = args['lfilename']
+    isISCSI = args['isISCSI']
+    segment = 0
+    util.SMlog("#### CLOUD upload %s to swift ####", lfilename)
+    savedpath = os.getcwd()
+    os.chdir(ldir)
+    try :
+        if isISCSI == 'ture':
+            cmd1 = [ lvchange , "-ay", lfilename ] 
+            util.pread2(cmd1)
+            cmd1 = [ lvdisplay, "-c", lfilename ]
+            lines = util.pread2(cmd).split(':');
+            size = long(lines[6]) * 512
+            if size > MAX_SEG_SIZE :
+                segment = 1
+        else :
+            size = os.path.getsize(lfilename)
+            if size > MAX_SEG_SIZE :        
+                segment = 1
+        if segment :             
+            cmd = [SWIFT, "-A", url, "-U", account + ":" + username, "-K", key, "upload", "-S", MAX_SEG_SIZE, container, lfilename]
+        else :
+            cmd = [SWIFT, "-A", url ,"-U", account + ":" + username, "-K", key, "upload", container, lfilename]
+        util.pread2(cmd)
+        return 'true'
+    finally:
+        os.chdir(savedpath)
+    return 'false'
+
+
+@echo
+def swift(session, args):
+    op = args['op']
+    if op == 'upload':
+        return upload(args)
+    elif op == 'download':
+        return download(args)
+    elif op == 'delete' :
+        cmd = ["st", "-A https://" + hostname + ":8080/auth/v1.0 -U " + account + ":" + username + " -K " + token + " delete " + rfilename]
+    else :
+        util.SMlog("doesn't support swift operation  %s " % op )
+        return 'false'
+    try:
+        util.pread2(cmd)
+        return 'true'
+    except:
+        return 'false'
+   
+if __name__ == "__main__":
+    XenAPIPlugin.dispatch({"swift": swift})

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloud-plugins.conf
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloud-plugins.conf b/scripts/vm/hypervisor/xenserver/cloud-plugins.conf
new file mode 100644
index 0000000..960a873
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/cloud-plugins.conf
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#   http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[LOGGING]
+# Logging options for CloudStack plugins
+debug = True
+verbose = True

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloud-prepare-upgrade.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloud-prepare-upgrade.sh b/scripts/vm/hypervisor/xenserver/cloud-prepare-upgrade.sh
index 4f80f72..3db628d 100755
--- a/scripts/vm/hypervisor/xenserver/cloud-prepare-upgrade.sh
+++ b/scripts/vm/hypervisor/xenserver/cloud-prepare-upgrade.sh
@@ -83,7 +83,7 @@ fake_pv_driver() {
     return 0
   fi
   host=$(xe vm-param-get uuid=$vm param-name=resident-on)
-  xe host-call-plugin host-uuid=$host plugin=vmops fn=preparemigration args:uuid=$vm
+  xe host-call-plugin host-uuid=$host plugin=cloud-plugin-generic fn=preparemigration args:uuid=$vm
 }
 
 vms=$(xe vm-list is-control-domain=false| grep ^uuid | awk '{print $NF}')

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloudlog
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloudlog b/scripts/vm/hypervisor/xenserver/cloudlog
new file mode 100644
index 0000000..9202253
--- /dev/null
+++ b/scripts/vm/hypervisor/xenserver/cloudlog
@@ -0,0 +1,12 @@
+/var/log/cloud-plugins.log {
+    daily
+    size 1M
+    rotate 20
+}
+
+/var/log/cloud-ovstunnel.log /var/log/cloud-ovs-pvlan.log {
+    daily
+    size 1M
+    rotate 2
+}
+

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloudstack_pluginlib.py
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloudstack_pluginlib.py b/scripts/vm/hypervisor/xenserver/cloudstack_pluginlib.py
deleted file mode 100644
index 422111f..0000000
--- a/scripts/vm/hypervisor/xenserver/cloudstack_pluginlib.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Common function for Cloudstack's XenAPI plugins
-
-import ConfigParser
-import logging
-import os
-import subprocess
-
-from time import localtime, asctime
-
-DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
-DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-DEFAULT_LOG_FILE = "/var/log/cloudstack_plugins.log"
-
-PLUGIN_CONFIG_PATH = "/etc/xensource/cloudstack_plugins.conf"
-OVSDB_PID_PATH = "/var/run/openvswitch/ovsdb-server.pid"
-OVSDB_DAEMON_PATH = "ovsdb-server"
-OVS_PID_PATH = "/var/run/openvswitch/ovs-vswitchd.pid"
-OVS_DAEMON_PATH = "ovs-vswitchd"
-VSCTL_PATH = "/usr/bin/ovs-vsctl"
-OFCTL_PATH = "/usr/bin/ovs-ofctl"
-XE_PATH = "/opt/xensource/bin/xe"
-
-
-class PluginError(Exception):
-    """Base Exception class for all plugin errors."""
-    def __init__(self, *args):
-        Exception.__init__(self, *args)
-
-
-def setup_logging(log_file=None):
-    debug = False
-    verbose = False
-    log_format = DEFAULT_LOG_FORMAT
-    log_date_format = DEFAULT_LOG_DATE_FORMAT
-    # try to read plugin configuration file
-    if os.path.exists(PLUGIN_CONFIG_PATH):
-        config = ConfigParser.ConfigParser()
-        config.read(PLUGIN_CONFIG_PATH)
-        try:
-            options = config.options('LOGGING')
-            if 'debug' in options:
-                debug = config.getboolean('LOGGING', 'debug')
-            if 'verbose' in options:
-                verbose = config.getboolean('LOGGING', 'verbose')
-            if 'format' in options:
-                log_format = config.get('LOGGING', 'format')
-            if 'date_format' in options:
-                log_date_format = config.get('LOGGING', 'date_format')
-            if 'file' in options:
-                log_file_2 = config.get('LOGGING', 'file')
-        except ValueError:
-            # configuration file contained invalid attributes
-            # ignore them
-            pass
-        except ConfigParser.NoSectionError:
-            # Missing 'Logging' section in configuration file
-            pass
-
-    root_logger = logging.root
-    if debug:
-        root_logger.setLevel(logging.DEBUG)
-    elif verbose:
-        root_logger.setLevel(logging.INFO)
-    else:
-        root_logger.setLevel(logging.WARNING)
-    formatter = logging.Formatter(log_format, log_date_format)
-
-    log_filename = log_file or log_file_2 or DEFAULT_LOG_FILE
-
-    logfile_handler = logging.FileHandler(log_filename)
-    logfile_handler.setFormatter(formatter)
-    root_logger.addHandler(logfile_handler)
-
-
-def do_cmd(cmd):
-    """Abstracts out the basics of issuing system commands. If the command
-    returns anything in stderr, a PluginError is raised with that information.
-    Otherwise, the output from stdout is returned.
-    """
-
-    pipe = subprocess.PIPE
-    logging.debug("Executing:%s", cmd)
-    proc = subprocess.Popen(cmd, shell=False, stdin=pipe, stdout=pipe,
-                            stderr=pipe, close_fds=True)
-    ret_code = proc.wait()
-    err = proc.stderr.read()
-    if ret_code:
-        logging.debug("The command exited with the error code: " +
-                      "%s (stderr output:%s)" % (ret_code, err))
-        raise PluginError(err)
-    output = proc.stdout.read()
-    if output.endswith('\n'):
-        output = output[:-1]
-    return output
-
-
-def _is_process_run(pidFile, name):
-    try:
-        fpid = open(pidFile, "r")
-        pid = fpid.readline()
-        fpid.close()
-    except IOError, e:
-        return -1
-
-    pid = pid[:-1]
-    ps = os.popen("ps -ae")
-    for l in ps:
-        if pid in l and name in l:
-            ps.close()
-            return 0
-
-    ps.close()
-    return -2
-
-
-def _is_tool_exist(name):
-    if os.path.exists(name):
-        return 0
-    return -1
-
-
-def check_switch():
-    global result
-
-    ret = _is_process_run(OVSDB_PID_PATH, OVSDB_DAEMON_PATH)
-    if ret < 0:
-        if ret == -1:
-            return "NO_DB_PID_FILE"
-        if ret == -2:
-            return "DB_NOT_RUN"
-
-    ret = _is_process_run(OVS_PID_PATH, OVS_DAEMON_PATH)
-    if ret < 0:
-        if ret == -1:
-            return "NO_SWITCH_PID_FILE"
-        if ret == -2:
-            return "SWITCH_NOT_RUN"
-
-    if _is_tool_exist(VSCTL_PATH) < 0:
-        return "NO_VSCTL"
-
-    if _is_tool_exist(OFCTL_PATH) < 0:
-        return "NO_OFCTL"
-
-    return "SUCCESS"
-
-
-def _build_flow_expr(**kwargs):
-    is_delete_expr = kwargs.get('delete', False)
-    flow = ""
-    if not is_delete_expr:
-        flow = "hard_timeout=%s,idle_timeout=%s,priority=%s"\
-                % (kwargs.get('hard_timeout', '0'),
-                   kwargs.get('idle_timeout', '0'),
-                   kwargs.get('priority', '1'))
-    in_port = 'in_port' in kwargs and ",in_port=%s" % kwargs['in_port'] or ''
-    dl_type = 'dl_type' in kwargs and ",dl_type=%s" % kwargs['dl_type'] or ''
-    dl_src = 'dl_src' in kwargs and ",dl_src=%s" % kwargs['dl_src'] or ''
-    dl_dst = 'dl_dst' in kwargs and ",dl_dst=%s" % kwargs['dl_dst'] or ''
-    nw_src = 'nw_src' in kwargs and ",nw_src=%s" % kwargs['nw_src'] or ''
-    nw_dst = 'nw_dst' in kwargs and ",nw_dst=%s" % kwargs['nw_dst'] or ''
-    proto = 'proto' in kwargs and ",%s" % kwargs['proto'] or ''
-    ip = ('nw_src' in kwargs or 'nw_dst' in kwargs) and ',ip' or ''
-    flow = (flow + in_port + dl_type + dl_src + dl_dst +
-            (ip or proto) + nw_src + nw_dst)
-    return flow
-
-
-def add_flow(bridge, **kwargs):
-    """
-    Builds a flow expression for **kwargs and adds the flow entry
-    to an Open vSwitch instance
-    """
-    flow = _build_flow_expr(**kwargs)
-    actions = 'actions' in kwargs and ",actions=%s" % kwargs['actions'] or ''
-    flow = flow + actions
-    addflow = [OFCTL_PATH, "add-flow", bridge, flow]
-    do_cmd(addflow)
-
-
-def del_flows(bridge, **kwargs):
-    """
-    Removes flows according to criteria passed as keyword.
-    """
-    flow = _build_flow_expr(delete=True, **kwargs)
-    # out_port condition does not exist for all flow commands
-    out_port = ("out_port" in kwargs and
-                ",out_port=%s" % kwargs['out_port'] or '')
-    flow = flow + out_port
-    delFlow = [OFCTL_PATH, 'del-flows', bridge, flow]
-    do_cmd(delFlow)
-
-
-def del_all_flows(bridge):
-    delFlow = [OFCTL_PATH, "del-flows", bridge]
-    do_cmd(delFlow)
-
-    normalFlow = "priority=0 idle_timeout=0 hard_timeout=0 actions=normal"
-    add_flow(bridge, normalFlow)
-
-
-def del_port(bridge, port):
-    delPort = [VSCTL_PATH, "del-port", bridge, port]
-    do_cmd(delPort)

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloudstack_plugins.conf
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloudstack_plugins.conf b/scripts/vm/hypervisor/xenserver/cloudstack_plugins.conf
deleted file mode 100644
index 8335893..0000000
--- a/scripts/vm/hypervisor/xenserver/cloudstack_plugins.conf
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-[LOGGING]
-# Logging options for Cloudstack plugins
-debug = True
-verbose = True

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/cloudstacklog
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/cloudstacklog b/scripts/vm/hypervisor/xenserver/cloudstacklog
deleted file mode 100644
index 2bae5ab..0000000
--- a/scripts/vm/hypervisor/xenserver/cloudstacklog
+++ /dev/null
@@ -1,12 +0,0 @@
-/var/log/vmops.log {
-    daily
-    size 1M
-    rotate 20
-}
-
-/var/log/ovstunnel.log /var/log/ovs-pvlan.log {
-    daily
-    size 1M
-    rotate 2
-}
-

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh b/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
index f170f69..d39fc6e 100755
--- a/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
+++ b/scripts/vm/hypervisor/xenserver/create_privatetemplate_from_snapshot.sh
@@ -96,7 +96,7 @@ if [ $? -ne 0 ]; then
   exit 0
 fi
 
-VHDUTIL="/opt/cloudstack/bin/vhd-util"
+VHDUTIL="/opt/cloud/bin/vhd-util"
 
 copyvhd()
 {

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/launch_hb.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/launch_hb.sh b/scripts/vm/hypervisor/xenserver/launch_hb.sh
index bde7fca..289eb5f 100755
--- a/scripts/vm/hypervisor/xenserver/launch_hb.sh
+++ b/scripts/vm/hypervisor/xenserver/launch_hb.sh
@@ -33,7 +33,7 @@ if [ -z $2 ]; then
   exit 3
 fi
 
-if [ ! -f /opt/cloudstack/bin/xenheartbeat.sh ]; then
+if [ ! -f /opt/cloud/bin/xenheartbeat.sh ]; then
   printf "Error: Unable to find xenheartbeat.sh to launch\n"
   exit 4
 fi
@@ -42,5 +42,5 @@ for psid in `ps -ef | grep xenheartbeat | grep -v grep | awk '{print $2}'`; do
   kill $psid
 done
 
-nohup /opt/cloudstack/bin/xenheartbeat.sh $1 $2 >/dev/null 2>/dev/null &
+nohup /opt/cloud/bin/xenheartbeat.sh $1 $2 >/dev/null 2>/dev/null &
 echo "======> DONE <======"

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/mockxcpplugin.py
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/mockxcpplugin.py b/scripts/vm/hypervisor/xenserver/mockxcpplugin.py
deleted file mode 100644
index 0de24ca..0000000
--- a/scripts/vm/hypervisor/xenserver/mockxcpplugin.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# This is for test purpose, to test xcp plugin
-
-import sys
-import XenAPI
-import os.path
-import traceback
-import socket
-def getHost():
-    hostname = socket.gethostname()
-    url = "http://localhost"
-    session = XenAPI.Session(url)
-    session.xenapi.login_with_password("root", "password")
-    host = session.xenapi.host
-    hosts = session.xenapi.host.get_by_name_label(hostname)
-    if len(hosts) != 1:
-        print "can't find host:" + hostname
-        sys.exit(1)
-    localhost = hosts[0]
-    return [host, localhost]
-
-def callPlugin(pluginName, func, params):
-    hostPair = getHost()
-    host = hostPair[0]
-    localhost = hostPair[1]
-    return host.call_plugin(localhost, pluginName, func, params)
-
-def main():
-    if len(sys.argv) < 3:
-        print "args: pluginName funcName params"
-        sys.exit(1)
-
-    pluginName = sys.argv[1]
-    funcName = sys.argv[2]
-
-    paramList = sys.argv[3:]
-    if (len(paramList) % 2) != 0:
-        print "params must be name/value pair"
-        sys.exit(2)
-    params = {}
-    pos = 0;
-    for i in range(len(paramList) / 2):
-        params[str(paramList[pos])] = str(paramList[pos+1])
-        pos = pos + 2
-    print "call: " + pluginName + " " + funcName + ", with params: " + str(params)
-    print "return: " +  callPlugin(pluginName, funcName, params)
-
-if __name__ == "__main__":
-    main()

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/ovs-pvlan
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/ovs-pvlan b/scripts/vm/hypervisor/xenserver/ovs-pvlan
deleted file mode 100755
index 31d60d0..0000000
--- a/scripts/vm/hypervisor/xenserver/ovs-pvlan
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-import cloudstack_pluginlib as lib
-import logging
-import os
-import sys
-import subprocess
-import time
-import XenAPIPlugin
-
-sys.path.append("/opt/xensource/sm/")
-import util
-
-from time import localtime as _localtime, asctime as _asctime
-
-CS_DIR = "/opt/cloudstack/bin/"
-
-xePath = "/opt/xensource/bin/xe"
-dhcpSetupPath = CS_DIR + "ovs-pvlan-dhcp-host.sh"
-vmSetupPath = CS_DIR + "ovs-pvlan-vm.sh"
-getDhcpIfacePath = CS_DIR + "ovs-get-dhcp-iface.sh"
-pvlanCleanupPath = CS_DIR + "ovs-pvlan-cleanup.sh"
-getBridgePath = CS_DIR + "ovs-get-bridge.sh"
-
-lib.setup_logging("/var/log/ovs-pvlan.log")
-
-def echo(fn):
-    def wrapped(*v, **k):
-        name = fn.__name__
-        logging.debug("#### VMOPS enter  %s ####" % name)
-        res = fn(*v, **k)
-        logging.debug("#### VMOPS exit  %s ####" % name)
-        return res
-    return wrapped
-
-@echo
-def setup_pvlan_dhcp(session, args):
-    op = args.pop("op")
-    nw_label = args.pop("nw-label")
-    primary = args.pop("primary-pvlan")
-    isolated = args.pop("isolated-pvlan")
-    dhcp_name = args.pop("dhcp-name")
-    dhcp_ip = args.pop("dhcp-ip")
-    dhcp_mac = args.pop("dhcp-mac")
-
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        return "FAILURE:%s" % res
-
-    logging.debug("Network is:%s" % (nw_label))
-    bridge = lib.do_cmd([getBridgePath, nw_label])
-    logging.debug("Determine bridge/switch is :%s" % (bridge))
-
-    if op == "add":
-        logging.debug("Try to get dhcp vm %s port on the switch:%s" % (dhcp_name, bridge))
-        dhcp_iface = lib.do_cmd([getDhcpIfacePath, bridge, dhcp_name])
-        logging.debug("About to setup dhcp vm on the switch:%s" % bridge)
-        res = lib.do_cmd([dhcpSetupPath, "-A", "-b", bridge, "-p", primary,
-            "-i", isolated, "-n", dhcp_name, "-d", dhcp_ip, "-m", dhcp_mac,
-            "-I", dhcp_iface])
-	if res:
-	    result = "FAILURE:%s" % res
-	    return result;
-	logging.debug("Setup dhcp vm on switch program done")
-    elif op == "delete":
-        logging.debug("About to remove dhcp the switch:%s" % bridge)
-        res = lib.do_cmd([dhcpSetupPath, "-D", "-b", bridge, "-p", primary,
-            "-i", isolated, "-n", dhcp_name, "-d", dhcp_ip, "-m", dhcp_mac])
-	if res:
-	    result = "FAILURE:%s" % res
-	    return result;
-	logging.debug("Remove DHCP on switch program done")
-    
-    result = "true"
-    logging.debug("Setup_pvlan_dhcp completed with result:%s" % result)
-    return result
-
-@echo
-def setup_pvlan_vm(session, args):
-    op = args.pop("op")
-    nw_label = args.pop("nw-label")
-    primary = args.pop("primary-pvlan")
-    isolated = args.pop("isolated-pvlan")
-    vm_mac = args.pop("vm-mac")
-    trunk_port = 1
-
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        return "FAILURE:%s" % res
-
-    bridge = lib.do_cmd([getBridgePath, nw_label])
-    logging.debug("Determine bridge/switch is :%s" % (bridge))
-
-    if op == "add":
-        logging.debug("About to setup vm on the switch:%s" % bridge)
-        res = lib.do_cmd([vmSetupPath, "-A", "-b", bridge, "-p", primary, "-i", isolated, "-v", vm_mac])
-	if res:
-	    result = "FAILURE:%s" % res
-	    return result;
-	logging.debug("Setup vm on switch program done")
-    elif op == "delete":
-        logging.debug("About to remove vm on the switch:%s" % bridge)
-        res = lib.do_cmd([vmSetupPath, "-D", "-b", bridge, "-p", primary, "-i", isolated, "-v", vm_mac])
-	if res:
-	    result = "FAILURE:%s" % res
-	    return result;
-	logging.debug("Remove vm on switch program done")
-
-    result = "true"
-    logging.debug("Setup_pvlan_vm_alone completed with result:%s" % result)
-    return result
-
-@echo
-def cleanup(session, args):
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        return "FAILURE:%s" % res
-
-    res = lib.do_cmd([pvlanCleanUpPath])
-    if res:
-        result = "FAILURE:%s" % res
-        return result;
-
-    result = "true"
-    logging.debug("Setup_pvlan_vm_dhcp completed with result:%s" % result)
-    return result
-
-if __name__ == "__main__":
-    XenAPIPlugin.dispatch({"setup-pvlan-dhcp": setup_pvlan_dhcp,
-                           "setup-pvlan-vm": setup_pvlan_vm,
-                           "cleanup":cleanup})

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py b/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py
index 8452dae..58e0a3d 100644
--- a/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py
+++ b/scripts/vm/hypervisor/xenserver/ovs-vif-flows.py
@@ -21,7 +21,7 @@
 import os
 import sys
 
-import cloudstack_pluginlib as pluginlib
+import cloud-plugin-lib as pluginlib
 
 
 def clear_flows(bridge, this_vif_ofport, vif_ofports):

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/ovstunnel
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/ovstunnel b/scripts/vm/hypervisor/xenserver/ovstunnel
deleted file mode 100755
index 1ff7e82..0000000
--- a/scripts/vm/hypervisor/xenserver/ovstunnel
+++ /dev/null
@@ -1,261 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#   http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-# Creates a tunnel mesh across xenserver hosts
-# Enforces broadcast drop rules on ingress GRE tunnels
-
-import cloudstack_pluginlib as lib
-import logging
-import os
-import sys
-import subprocess
-import time
-import XenAPIPlugin
-
-sys.path.append("/opt/xensource/sm/")
-import util
-
-from time import localtime as _localtime, asctime as _asctime
-
-xePath = "/opt/xensource/bin/xe"
-lib.setup_logging("/var/log/ovstunnel.log")
-
-
-def block_ipv6_v5(bridge):
-    lib.add_flow(bridge, priority=65000, dl_type='0x86dd', actions='drop')
-
-
-def block_ipv6_v6(bridge):
-    lib.add_flow(bridge, priority=65000, proto='ipv6', actions='drop')
-
-
-block_ipv6_handlers = {
-        '5': block_ipv6_v5,
-        '6': block_ipv6_v6}
-
-
-def echo(fn):
-    def wrapped(*v, **k):
-        name = fn.__name__
-        logging.debug("#### VMOPS enter  %s ####" % name)
-        res = fn(*v, **k)
-        logging.debug("#### VMOPS exit  %s ####" % name)
-        return res
-    return wrapped
-
-
-@echo
-def setup_ovs_bridge(session, args):
-    bridge = args.pop("bridge")
-    key = args.pop("key")
-    xs_nw_uuid = args.pop("xs_nw_uuid")
-    cs_host_id = args.pop("cs_host_id")
-
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        return "FAILURE:%s" % res
-
-    logging.debug("About to manually create the bridge:%s" % bridge)
-    # create a bridge with the same name as the xapi network
-    # also associate gre key in other config attribute
-    res = lib.do_cmd([lib.VSCTL_PATH, "--", "--may-exist", "add-br", bridge,
-                                     "--", "set", "bridge", bridge,
-                                     "other_config:gre_key=%s" % key])
-    logging.debug("Bridge has been manually created:%s" % res)
-    # TODO: Make sure xs-network-uuid is set into external_ids
-    lib.do_cmd([lib.VSCTL_PATH, "set", "Bridge", bridge,
-                            "external_ids:xs-network-uuid=%s" % xs_nw_uuid])
-    # Non empty result means something went wrong
-    if res:
-        result = "FAILURE:%s" % res
-    else:
-        # Verify the bridge actually exists, with the gre_key properly set
-        res = lib.do_cmd([lib.VSCTL_PATH, "get", "bridge",
-                                          bridge, "other_config:gre_key"])
-        if key in res:
-            result = "SUCCESS:%s" % bridge
-        else:
-            result = "FAILURE:%s" % res
-        # Finally note in the xenapi network object that the network has
-        # been configured
-        xs_nw_uuid = lib.do_cmd([lib.XE_PATH, "network-list",
-                                "bridge=%s" % bridge, "--minimal"])
-        lib.do_cmd([lib.XE_PATH, "network-param-set", "uuid=%s" % xs_nw_uuid,
-                   "other-config:is-ovs-tun-network=True"])
-        conf_hosts = lib.do_cmd([lib.XE_PATH, "network-param-get",
-                                "uuid=%s" % xs_nw_uuid,
-                                "param-name=other-config",
-                                "param-key=ovs-host-setup", "--minimal"])
-        conf_hosts = cs_host_id + (conf_hosts and ',%s' % conf_hosts or '')
-        lib.do_cmd([lib.XE_PATH, "network-param-set", "uuid=%s" % xs_nw_uuid,
-                   "other-config:ovs-host-setup=%s" % conf_hosts])
-
-        # BLOCK IPv6 - Flow spec changes with ovs version
-        host_list_cmd = [lib.XE_PATH, 'host-list', '--minimal']
-        host_list_str = lib.do_cmd(host_list_cmd)
-        host_uuid = host_list_str.split(',')[0].strip()
-        version_cmd = [lib.XE_PATH, 'host-param-get', 'uuid=%s' % host_uuid,
-                                   'param-name=software-version',
-                                   'param-key=product_version']
-        version = lib.do_cmd(version_cmd).split('.')[0]
-        block_ipv6_handlers[version](bridge)
-    logging.debug("Setup_ovs_bridge completed with result:%s" % result)
-    return result
-
-
-@echo
-def destroy_ovs_bridge(session, args):
-    bridge = args.pop("bridge")
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        return res
-    res = lib.do_cmd([lib.VSCTL_PATH, "del-br", bridge])
-    logging.debug("Bridge has been manually removed:%s" % res)
-    if res:
-        result = "FAILURE:%s" % res
-    else:
-        # Note that the bridge has been removed on xapi network object
-        xs_nw_uuid = lib.do_cmd([xePath, "network-list",
-                                "bridge=%s" % bridge, "--minimal"])
-        #FIXME: WOW, this an error
-        #lib.do_cmd([xePath,"network-param-set", "uuid=%s" % xs_nw_uuid,
-        #                  "other-config:ovs-setup=False"])
-        result = "SUCCESS:%s" % bridge
-
-    logging.debug("Destroy_ovs_bridge completed with result:%s" % result)
-    return result
-
-
-@echo
-def create_tunnel(session, args):
-    bridge = args.pop("bridge")
-    remote_ip = args.pop("remote_ip")
-    gre_key = args.pop("key")
-    src_host = args.pop("from")
-    dst_host = args.pop("to")
-
-    logging.debug("Entering create_tunnel")
-
-    res = lib.check_switch()
-    if res != "SUCCESS":
-        logging.debug("Openvswitch running: NO")
-        return "FAILURE:%s" % res
-
-    # We need to keep the name below 14 characters
-    # src and target are enough - consider a fixed length hash
-    name = "t%s-%s-%s" % (gre_key, src_host, dst_host)
-
-    # Verify the xapi bridge to be created
-    # NOTE: Timeout should not be necessary anymore
-    wait = [lib.VSCTL_PATH, "--timeout=30", "wait-until", "bridge",
-                    bridge, "--", "get", "bridge", bridge, "name"]
-    res = lib.do_cmd(wait)
-    if bridge not in res:
-        logging.debug("WARNING:Can't find bridge %s for creating " +
-                                  "tunnel!" % bridge)
-        return "FAILURE:NO_BRIDGE"
-    logging.debug("bridge %s for creating tunnel - VERIFIED" % bridge)
-    tunnel_setup = False
-    drop_flow_setup = False
-    try:
-        # Create a port and configure the tunnel interface for it
-        add_tunnel = [lib.VSCTL_PATH, "add-port", bridge,
-                                  name, "--", "set", "interface",
-                                  name, "type=gre", "options:key=%s" % gre_key,
-                                  "options:remote_ip=%s" % remote_ip]
-        lib.do_cmd(add_tunnel)
-        tunnel_setup = True
-        # verify port
-        verify_port = [lib.VSCTL_PATH, "get", "port", name, "interfaces"]
-        res = lib.do_cmd(verify_port)
-        # Expecting python-style list as output
-        iface_list = []
-        if len(res) > 2:
-            iface_list = res.strip()[1:-1].split(',')
-        if len(iface_list) != 1:
-            logging.debug("WARNING: Unexpected output while verifying " +
-                                      "port %s on bridge %s" % (name, bridge))
-            return "FAILURE:VERIFY_PORT_FAILED"
-
-        # verify interface
-        iface_uuid = iface_list[0]
-        verify_interface_key = [lib.VSCTL_PATH, "get", "interface",
-                                iface_uuid, "options:key"]
-        verify_interface_ip = [lib.VSCTL_PATH, "get", "interface",
-                               iface_uuid, "options:remote_ip"]
-
-        key_validation = lib.do_cmd(verify_interface_key)
-        ip_validation = lib.do_cmd(verify_interface_ip)
-
-        if not gre_key in key_validation or not remote_ip in ip_validation:
-            logging.debug("WARNING: Unexpected output while verifying " +
-                          "interface %s on bridge %s" % (name, bridge))
-            return "FAILURE:VERIFY_INTERFACE_FAILED"
-        logging.debug("Tunnel interface validated:%s" % verify_interface_ip)
-        cmd_tun_ofport = [lib.VSCTL_PATH, "get", "interface",
-                                          iface_uuid, "ofport"]
-        tun_ofport = lib.do_cmd(cmd_tun_ofport)
-        # Ensure no trailing LF
-        if tun_ofport.endswith('\n'):
-            tun_ofport = tun_ofport[:-1]
-        # add flow entryies for dropping broadcast coming in from gre tunnel
-        lib.add_flow(bridge, priority=1000, in_port=tun_ofport,
-                         dl_dst='ff:ff:ff:ff:ff:ff', actions='drop')
-        lib.add_flow(bridge, priority=1000, in_port=tun_ofport,
-                     nw_dst='224.0.0.0/24', actions='drop')
-        drop_flow_setup = True
-        logging.debug("Broadcast drop rules added")
-        return "SUCCESS:%s" % name
-    except:
-        logging.debug("An unexpected error occured. Rolling back")
-        if tunnel_setup:
-            logging.debug("Deleting GRE interface")
-            # Destroy GRE port and interface
-            lib.del_port(bridge, name)
-        if drop_flow_setup:
-            # Delete flows
-            logging.debug("Deleting flow entries from GRE interface")
-            lib.del_flows(bridge, in_port=tun_ofport)
-        # This will not cancel the original exception
-        raise
-
-
-@echo
-def destroy_tunnel(session, args):
-    bridge = args.pop("bridge")
-    iface_name = args.pop("in_port")
-    logging.debug("Destroying tunnel at port %s for bridge %s"
-                            % (iface_name, bridge))
-    ofport = get_field_of_interface(iface_name, "ofport")
-    lib.del_flows(bridge, in_port=ofport)
-    lib.del_port(bridge, iface_name)
-    return "SUCCESS"
-
-
-def get_field_of_interface(iface_name, field):
-    get_iface_cmd = [lib.VSCTL_PATH, "get", "interface", iface_name, field]
-    res = lib.do_cmd(get_iface_cmd)
-    return res
-
-
-if __name__ == "__main__":
-    XenAPIPlugin.dispatch({"create_tunnel": create_tunnel,
-                           "destroy_tunnel": destroy_tunnel,
-                           "setup_ovs_bridge": setup_ovs_bridge,
-                           "destroy_ovs_bridge": destroy_ovs_bridge})

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/s3xen
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/s3xen b/scripts/vm/hypervisor/xenserver/s3xen
deleted file mode 100644
index bf81bbd..0000000
--- a/scripts/vm/hypervisor/xenserver/s3xen
+++ /dev/null
@@ -1,428 +0,0 @@
-#!/usr/bin/python
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Version @VERSION@
-#
-# A plugin for executing script needed by CloudStack
-from copy import copy
-from datetime import datetime
-from httplib import *
-from string import join
-from string import split
-
-import os
-import sys
-import time
-import md5 as md5mod
-import sha
-import base64
-import hmac
-import traceback
-import urllib2
-from xml.dom.minidom import parseString
-
-import XenAPIPlugin
-sys.path.extend(["/opt/xensource/sm/"])
-import util
-
-NULL = 'null'
-
-# Value conversion utility functions ...
-
-
-def to_none(value):
-
-    if value is None:
-        return None
-    if isinstance(value, basestring) and value.strip().lower() == NULL:
-        return None
-    return value
-
-
-def to_bool(value):
-
-    if to_none(value) is None:
-        return False
-    if isinstance(value, basestring) and value.strip().lower() == 'true':
-        return True
-    if isinstance(value, int) and value:
-        return True
-    return False
-
-
-def to_integer(value, default):
-
-    if to_none(value) is None or not isinstance(value, int):
-        return default
-    return int(value)
-
-
-def optional_str_value(value, default):
-
-    if is_not_blank(value):
-        return value
-    return default
-
-
-def is_blank(value):
-
-    return not is_not_blank(value)
-
-
-def is_not_blank(value):
-
-    if to_none(value) is None or not isinstance(value, basestring):
-        return True
-    if value.strip == '':
-        return False
-    return True
-
-
-def get_optional_key(map, key, default=''):
-
-    if key in map:
-        return map[key]
-    return default
-
-
-def log(message):
-
-    util.SMlog('#### VMOPS %s ####' % message)
-
-
-def echo(fn):
-    def wrapped(*v, **k):
-        name = fn.__name__
-        log("enter %s ####" % name)
-        res = fn(*v, **k)
-        log("exit %s with result %s" % (name, res))
-        return res
-    return wrapped
-
-
-def require_str_value(value, error_message):
-
-    if is_not_blank(value):
-        return value
-
-    raise ValueError(error_message)
-
-
-def retry(max_attempts, fn):
-
-    attempts = 1
-    while attempts <= max_attempts:
-        log("Attempting execution " + str(attempts) + "/" + str(
-            max_attempts) + " of " + fn.__name__)
-        try:
-            return fn()
-        except:
-            if (attempts >= max_attempts):
-                raise
-            attempts = attempts + 1
-
-
-def compute_md5(filename, buffer_size=8192):
-
-    hasher = md5mod.md5()
-
-    file = open(filename, 'rb')
-    try:
-
-        data = file.read(buffer_size)
-        while data != "":
-            hasher.update(data)
-            data = file.read(buffer_size)
-
-        return base64.encodestring(hasher.digest())[:-1]
-
-    finally:
-
-        file.close()
-
-
-class S3Client(object):
-
-    DEFAULT_END_POINT = 's3.amazonaws.com'
-    DEFAULT_CONNECTION_TIMEOUT = 50000
-    DEFAULT_SOCKET_TIMEOUT = 50000
-    DEFAULT_MAX_ERROR_RETRY = 3
-
-    HEADER_CONTENT_MD5 = 'Content-MD5'
-    HEADER_CONTENT_TYPE = 'Content-Type'
-    HEADER_CONTENT_LENGTH = 'Content-Length'
-
-    def __init__(self, access_key, secret_key, end_point=None,
-                 https_flag=None, connection_timeout=None, socket_timeout=None,
-                 max_error_retry=None):
-
-        self.access_key = require_str_value(
-            access_key, 'An access key must be specified.')
-        self.secret_key = require_str_value(
-            secret_key, 'A secret key must be specified.')
-        self.end_point = optional_str_value(end_point, self.DEFAULT_END_POINT)
-        self.https_flag = to_bool(https_flag)
-        self.connection_timeout = to_integer(
-            connection_timeout, self.DEFAULT_CONNECTION_TIMEOUT)
-        self.socket_timeout = to_integer(
-            socket_timeout, self.DEFAULT_SOCKET_TIMEOUT)
-        self.max_error_retry = to_integer(
-            max_error_retry, self.DEFAULT_MAX_ERROR_RETRY)
-
-    def build_canocialized_resource(self, bucket, key):
-        if not key.startswith("/"):
-            uri = bucket + "/" + key
-        else:
-            uri = bucket + key
-
-        return "/" + uri
-
-    def noop_send_body(connection):
-        pass
-
-    def noop_read(response):
-        return response.read()
-
-    def do_operation(
-        self, method, bucket, key, input_headers={},
-            fn_send_body=noop_send_body, fn_read=noop_read):
-
-        headers = copy(input_headers)
-        headers['Expect'] = '100-continue'
-
-        uri = self.build_canocialized_resource(bucket, key)
-        signature, request_date = self.sign_request(method, uri, headers)
-        headers['Authorization'] = "AWS " + self.access_key + ":" + signature
-        headers['Date'] = request_date
-
-        def perform_request():
-            connection = None
-            if self.https_flag:
-                connection = HTTPSConnection(self.end_point)
-            else:
-                connection = HTTPConnection(self.end_point)
-
-            try:
-                connection.timeout = self.socket_timeout
-                connection.putrequest(method, uri)
-
-                for k, v in headers.items():
-                    connection.putheader(k, v)
-                connection.endheaders()
-
-                fn_send_body(connection)
-
-                response = connection.getresponse()
-                log("Sent " + method + " request to " + self.end_point +
-                    uri + " with headers " + str(headers) +
-                    ".  Received response status " + str(response.status) +
-                    ": " + response.reason)
-
-                return fn_read(response)
-
-            finally:
-                connection.close()
-
-        return retry(self.max_error_retry, perform_request)
-
-    '''
-    See http://bit.ly/MMC5de for more information regarding the creation of
-    AWS authorization tokens and header signing
-    '''
-    def sign_request(self, operation, canocialized_resource, headers):
-
-        request_date = datetime.utcnow(
-        ).strftime('%a, %d %b %Y %H:%M:%S +0000')
-
-        content_hash = get_optional_key(headers, self.HEADER_CONTENT_MD5)
-        content_type = get_optional_key(headers, self.HEADER_CONTENT_TYPE)
-
-        string_to_sign = join(
-            [operation, content_hash, content_type, request_date,
-                canocialized_resource], '\n')
-
-        signature = base64.encodestring(
-            hmac.new(self.secret_key, string_to_sign.encode('utf8'),
-                     sha).digest())[:-1]
-
-        return signature, request_date
-        
-    def getText(self, nodelist):
-        rc = []
-        for node in nodelist:
-            if node.nodeType == node.TEXT_NODE:
-                rc.append(node.data)
-        return ''.join(rc)
-
-    def multiUpload(self, bucket, key, src_fileName, chunkSize=5 * 1024 * 1024):
-        uploadId={}
-        def readInitalMultipart(response):
-           data = response.read()
-           xmlResult = parseString(data) 
-           result = xmlResult.getElementsByTagName("InitiateMultipartUploadResult")[0]
-           upload = result.getElementsByTagName("UploadId")[0]
-           uploadId["0"] = upload.childNodes[0].data
-       
-        self.do_operation('POST', bucket, key + "?uploads", fn_read=readInitalMultipart) 
-
-        fileSize = os.path.getsize(src_fileName) 
-        parts = fileSize / chunkSize + ((fileSize % chunkSize) and 1)
-        part = 1
-        srcFile = open(src_fileName, 'rb')
-        etags = []
-        while part <= parts:
-            offset = part - 1
-            size = min(fileSize - offset * chunkSize, chunkSize)
-            headers = {
-                self.HEADER_CONTENT_LENGTH: size
-            }
-            def send_body(connection): 
-               srcFile.seek(offset * chunkSize)
-               block = srcFile.read(size)
-               connection.send(block)
-            def read_multiPart(response):
-               etag = response.getheader('ETag') 
-               etags.append((part, etag))
-            self.do_operation("PUT", bucket, "%s?partNumber=%s&uploadId=%s"%(key, part, uploadId["0"]), headers, send_body, read_multiPart)
-            part = part + 1
-        srcFile.close()
-
-        data = [] 
-        partXml = "<Part><PartNumber>%i</PartNumber><ETag>%s</ETag></Part>"
-        for etag in etags:
-            data.append(partXml%etag)
-        msg = "<CompleteMultipartUpload>%s</CompleteMultipartUpload>"%("".join(data))
-        size = len(msg)
-        headers = {
-            self.HEADER_CONTENT_LENGTH: size
-        }
-        def send_complete_multipart(connection):
-            connection.send(msg) 
-        self.do_operation("POST", bucket, "%s?uploadId=%s"%(key, uploadId["0"]), headers, send_complete_multipart)
-
-    def put(self, bucket, key, src_filename, maxSingleUpload):
-
-        if not os.path.isfile(src_filename):
-            raise Exception(
-                "Attempt to put " + src_filename + " that does not exist.")
-
-        size = os.path.getsize(src_filename)
-        if size > maxSingleUpload or maxSingleUpload == 0:
-            return self.multiUpload(bucket, key, src_filename)
-           
-        headers = {
-            self.HEADER_CONTENT_MD5: compute_md5(src_filename),
-        
-            self.HEADER_CONTENT_TYPE: 'application/octet-stream',
-            self.HEADER_CONTENT_LENGTH: str(os.stat(src_filename).st_size),
-        }
-
-        def send_body(connection):
-            src_file = open(src_filename, 'rb')
-            try:
-                while True:
-                    block = src_file.read(8192)
-                    if not block:
-                        break
-                    connection.send(block)
-
-            except:
-                src_file.close()
-
-        self.do_operation('PUT', bucket, key, headers, send_body)
-
-    def get(self, bucket, key, target_filename):
-
-        def read(response):
-
-            file = open(target_filename, 'wb')
-
-            try:
-
-                while True:
-                    block = response.read(8192)
-                    if not block:
-                        break
-                    file.write(block)
-            except:
-
-                file.close()
-
-        return self.do_operation('GET', bucket, key, fn_read=read)
-
-    def delete(self, bucket, key):
-
-        return self.do_operation('DELETE', bucket, key)
-
-
-def parseArguments(args):
-
-    # The keys in the args map will correspond to the properties defined on
-    # the com.cloud.utils.S3Utils#ClientOptions interface
-    client = S3Client(
-        args['accessKey'], args['secretKey'], args['endPoint'],
-        args['https'], args['connectionTimeout'], args['socketTimeout'])
-
-    operation = args['operation']
-    bucket = args['bucket']
-    key = args['key']
-    filename = args['filename']
-    maxSingleUploadBytes = int(args["maxSingleUploadSizeInBytes"])
-
-    if is_blank(operation):
-        raise ValueError('An operation must be specified.')
-
-    if is_blank(bucket):
-        raise ValueError('A bucket must be specified.')
-
-    if is_blank(key):
-        raise ValueError('A value must be specified.')
-
-    if is_blank(filename):
-        raise ValueError('A filename must be specified.')
-
-    return client, operation, bucket, key, filename, maxSingleUploadBytes
-
-
-@echo
-def s3(session, args):
-
-    client, operation, bucket, key, filename, maxSingleUploadBytes = parseArguments(args)
-
-    try:
-
-        if operation == 'put':
-            client.put(bucket, key, filename, maxSingleUploadBytes)
-        elif operation == 'get':
-            client.get(bucket, key, filename)
-        elif operation == 'delete':
-            client.delete(bucket, key, filename)
-        else:
-            raise RuntimeError(
-                "S3 plugin does not support operation " + operation)
-
-        return 'true'
-
-    except:
-        log("Operation " + operation + " on file " + filename +
-            " from/in bucket " + bucket + " key " + key)
-        log(traceback.format_exc())
-        return 'false'
-
-if __name__ == "__main__":
-    XenAPIPlugin.dispatch({"s3": s3})

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh b/scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh
index fdb3099..e6425d8 100755
--- a/scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh
+++ b/scripts/vm/hypervisor/xenserver/setup_heartbeat_file.sh
@@ -58,7 +58,7 @@ if [ `xe pbd-list sr-uuid=$2 | grep -B 1 $1 | wc -l` -eq 0 ]; then
   exit 0
 fi
 
-hbfile=/opt/cloudstack/bin/heartbeat
+hbfile=/etc/cloud/heartbeat
 
 if [ "$3" = "true" ]; then
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/f2bb1ace/scripts/vm/hypervisor/xenserver/setupxenserver.sh
----------------------------------------------------------------------
diff --git a/scripts/vm/hypervisor/xenserver/setupxenserver.sh b/scripts/vm/hypervisor/xenserver/setupxenserver.sh
index 61ce90b..14a1d46 100755
--- a/scripts/vm/hypervisor/xenserver/setupxenserver.sh
+++ b/scripts/vm/hypervisor/xenserver/setupxenserver.sh
@@ -55,7 +55,7 @@ mv -n /etc/cron.daily/logrotate /etc/cron.hourly 2>&1
 echo 1048576 >/proc/sys/fs/aio-max-nr
 
 # empty heartbeat
-cat /dev/null > /opt/cloudstack/bin/heartbeat
+cat /dev/null > /etc/cloud/heartbeat
 # empty knownhost
 cat /dev/null > /root/.ssh/known_hosts
 


Mime
View raw message