cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] rhtyd closed pull request #2146: CLOUDSTACK-4757: Support OVA files with multiple disks for templates
Date Thu, 01 Jan 1970 00:00:00 GMT
rhtyd closed pull request #2146: CLOUDSTACK-4757: Support OVA files with multiple disks for templates
URL: https://github.com/apache/cloudstack/pull/2146
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/api/src/com/cloud/agent/api/storage/OVFHelper.java b/api/src/com/cloud/agent/api/storage/OVFHelper.java
new file mode 100644
index 00000000000..762c8c79915
--- /dev/null
+++ b/api/src/com/cloud/agent/api/storage/OVFHelper.java
@@ -0,0 +1,355 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+
+import com.cloud.configuration.Resource.ResourceType;
+import com.cloud.exception.InternalErrorException;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.math.NumberUtils;
+import org.apache.log4j.Logger;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import com.cloud.agent.api.to.DatadiskTO;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class OVFHelper {
+    private static final Logger s_logger = Logger.getLogger(OVFHelper.class);
+
+    /**
+     * Get disk virtual size given its values on fields: 'ovf:capacity' and 'ovf:capacityAllocationUnits'
+     * @param capacity capacity
+     * @param allocationUnits capacity allocation units
+     * @return disk virtual size
+     */
+    public static Long getDiskVirtualSize(Long capacity, String allocationUnits, String ovfFilePath) throws InternalErrorException {
+        if ((capacity != 0) && (allocationUnits != null)) {
+            long units = 1;
+            if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
+                units = ResourceType.bytesToKiB;
+            } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
+                units = ResourceType.bytesToMiB;
+            } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
+                units = ResourceType.bytesToGiB;
+            }
+            return capacity * units;
+        } else {
+            throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFilePath);
+        }
+    }
+
+    public List<DatadiskTO> getOVFVolumeInfo(final String ovfFilePath) {
+        if (StringUtils.isBlank(ovfFilePath)) {
+            return new ArrayList<DatadiskTO>();
+        }
+        ArrayList<OVFFile> vf = new ArrayList<OVFFile>();
+        ArrayList<OVFDisk> vd = new ArrayList<OVFDisk>();
+
+        File ovfFile = new File(ovfFilePath);
+        try {
+            final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath));
+            NodeList disks = doc.getElementsByTagName("Disk");
+            NodeList files = doc.getElementsByTagName("File");
+            NodeList items = doc.getElementsByTagName("Item");
+            boolean toggle = true;
+            for (int j = 0; j < files.getLength(); j++) {
+                Element file = (Element)files.item(j);
+                OVFFile of = new OVFFile();
+                of._href = file.getAttribute("ovf:href");
+                if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) {
+                    s_logger.info("MDOVA getOVFVolumeInfo File href = " + of._href);
+                    of._id = file.getAttribute("ovf:id");
+                    s_logger.info("MDOVA getOVFVolumeInfo File Id = " + of._id);
+                    String size = file.getAttribute("ovf:size");
+                    if (StringUtils.isNotBlank(size)) {
+                        of._size = Long.parseLong(size);
+                    } else {
+                        String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
+                        File this_file = new File(dataDiskPath);
+                        of._size = this_file.length();
+                    }
+                    of.isIso = of._href.endsWith("iso");
+                    if (toggle && !of.isIso) {
+                        of._bootable = true;
+                        toggle = !toggle;
+                    }
+                    vf.add(of);
+                }
+            }
+            for (int i = 0; i < disks.getLength(); i++) {
+                Element disk = (Element)disks.item(i);
+                OVFDisk od = new OVFDisk();
+                String virtualSize = disk.getAttribute("ovf:capacity");
+                od._capacity = NumberUtils.toLong(virtualSize, 0L);
+                String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
+                od._diskId = disk.getAttribute("ovf:diskId");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:diskId  = " + od._diskId);
+                od._fileRef = disk.getAttribute("ovf:fileRef");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:fileRef  = " + od._fileRef);
+                od._populatedSize = Long.parseLong(disk.getAttribute("ovf:populatedSize") == null ? "0" : disk.getAttribute("ovf:populatedSize"));
+                s_logger.info("MDOVA getOVFVolumeInfo Disk _populatedSize  = " + od._populatedSize);
+
+                if ((od._capacity != 0) && (allocationUnits != null)) {
+
+                    long units = 1;
+                    if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
+                        units = ResourceType.bytesToKiB;
+                    } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
+                        units = ResourceType.bytesToMiB;
+                    } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
+                        units = ResourceType.bytesToGiB;
+                    }
+                    od._capacity = od._capacity * units;
+                    s_logger.info("MDOVA getOVFVolumeInfo Disk _capacity  = " + od._capacity);
+                }
+                od._controller = getControllerType(items, od._diskId);
+                vd.add(od);
+            }
+
+        } catch (SAXException | IOException | ParserConfigurationException e) {
+            s_logger.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath, e);
+            throw new CloudRuntimeException(e);
+        }
+
+        List<DatadiskTO> disksTO = new ArrayList<DatadiskTO>();
+        for (OVFFile of : vf) {
+            if (StringUtils.isBlank(of._id)){
+                s_logger.error("The ovf file info is incomplete file info");
+                throw new CloudRuntimeException("The ovf file info has incomplete file info");
+            }
+            OVFDisk cdisk = getDisk(of._id, vd);
+            if (cdisk == null && !of.isIso){
+                s_logger.error("The ovf file info has incomplete disk info");
+                throw new CloudRuntimeException("The ovf file info has incomplete disk info");
+            }
+            Long capacity = cdisk == null ? of._size : cdisk._capacity;
+            String controller = cdisk == null ? "" : cdisk._controller._name;
+            String controllerSubType = cdisk == null ? "" : cdisk._controller._subType;
+            String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
+            s_logger.info("MDOVA getOVFVolumeInfo diskName = " + of._href + ", dataDiskPath = " + dataDiskPath);
+            File f = new File(dataDiskPath);
+            if (!f.exists() || f.isDirectory()) {
+                s_logger.error("One of the attached disk or iso does not exists " + dataDiskPath);
+                throw new CloudRuntimeException("One of the attached disk or iso as stated on OVF does not exists " + dataDiskPath);
+            }
+            disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of.isIso, of._bootable, controller, controllerSubType));
+        }
+        //check if first disk is an iso move it to the end
+        DatadiskTO fd = disksTO.get(0);
+        if (fd.isIso()) {
+            disksTO.remove(0);
+            disksTO.add(fd);
+        }
+        return disksTO;
+    }
+
+    private OVFDiskController getControllerType(final NodeList itemList, final String diskId) {
+        for (int k = 0; k < itemList.getLength(); k++) {
+            Element item = (Element)itemList.item(k);
+            NodeList cn = item.getChildNodes();
+            for (int l = 0; l < cn.getLength(); l++) {
+                if (cn.item(l) instanceof Element) {
+                    Element el = (Element)cn.item(l);
+                    if ("rasd:HostResource".equals(el.getNodeName())
+                            && (el.getTextContent().contains("ovf:/file/" + diskId) || el.getTextContent().contains("ovf:/disk/" + diskId))) {
+                        Element oe = getParentNode(itemList, item);
+                        Element voe = oe;
+                        while (oe != null) {
+                            voe = oe;
+                            oe = getParentNode(itemList, voe);
+                        }
+                        return getController(voe);
+                    }
+                }
+            }
+        }
+        return null;
+    }
+
+    private Element getParentNode(final NodeList itemList, final Element childItem) {
+        NodeList cn = childItem.getChildNodes();
+        String parent_id = null;
+        for (int l = 0; l < cn.getLength(); l++) {
+            if (cn.item(l) instanceof Element) {
+                Element el = (Element)cn.item(l);
+                if ("rasd:Parent".equals(el.getNodeName())) {
+                    s_logger.info("MDOVA parent id " + el.getTextContent());
+                    parent_id = el.getTextContent();
+                }
+            }
+        }
+        if (parent_id != null) {
+            for (int k = 0; k < itemList.getLength(); k++) {
+                Element item = (Element)itemList.item(k);
+                NodeList child = item.getChildNodes();
+                for (int l = 0; l < child.getLength(); l++) {
+                    if (child.item(l) instanceof Element) {
+                        Element el = (Element)child.item(l);
+                        if ("rasd:InstanceID".equals(el.getNodeName()) && el.getTextContent().trim().equals(parent_id)) {
+                            s_logger.info("MDOVA matching parent entry " + el.getTextContent());
+                            return item;
+                        }
+                    }
+                }
+            }
+        }
+        return null;
+    }
+
+    private OVFDiskController getController(Element controllerItem) {
+        OVFDiskController dc = new OVFDiskController();
+        NodeList child = controllerItem.getChildNodes();
+        for (int l = 0; l < child.getLength(); l++) {
+            if (child.item(l) instanceof Element) {
+                Element el = (Element)child.item(l);
+                if ("rasd:ElementName".equals(el.getNodeName())) {
+                    s_logger.info("MDOVA controller name " + el.getTextContent());
+                    dc._name = el.getTextContent();
+                }
+                if ("rasd:ResourceSubType".equals(el.getNodeName())) {
+                    s_logger.info("MDOVA controller sub type " + el.getTextContent());
+                    dc._subType = el.getTextContent();
+                }
+            }
+        }
+        return dc;
+    }
+
+    public void rewriteOVFFile(final String origOvfFilePath, final String newOvfFilePath, final String diskName) {
+        try {
+            final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(origOvfFilePath));
+            NodeList disks = doc.getElementsByTagName("Disk");
+            NodeList files = doc.getElementsByTagName("File");
+            NodeList items = doc.getElementsByTagName("Item");
+            String keepfile = null;
+            List<Element> toremove = new ArrayList<Element>();
+            for (int j = 0; j < files.getLength(); j++) {
+                Element file = (Element)files.item(j);
+                String href = file.getAttribute("ovf:href");
+                s_logger.info("MDOVA rewriteOVFFile href= " + href);
+                if (diskName.equals(href)) {
+                    keepfile = file.getAttribute("ovf:id");
+                    s_logger.info("MDOVA rewriteOVFFile keeping file = " + file.getAttribute("ovf:id"));
+                } else {
+                    s_logger.info("MDOVA rewriteOVFFile removing file = " + file.getAttribute("ovf:id"));
+                    toremove.add(file);
+                }
+            }
+            String keepdisk = null;
+            for (int i = 0; i < disks.getLength(); i++) {
+                Element disk = (Element)disks.item(i);
+                String fileRef = disk.getAttribute("ovf:fileRef");
+                if (keepfile == null) {
+                    s_logger.info("FATAL: OVA format error");
+                } else if (keepfile.equals(fileRef)) {
+                    s_logger.info("MDOVA rewriteOVFFile keeping disk = " + fileRef);
+                    keepdisk = disk.getAttribute("ovf:diskId");
+                } else {
+                    s_logger.info("MDOVA rewriteOVFFile removing disk = " + fileRef);
+                    s_logger.info("MDOVA rewriteOVFFile id = " + disk.getAttribute("ovf:diskId"));
+                    toremove.add(disk);
+                }
+            }
+            for (int k = 0; k < items.getLength(); k++) {
+                Element item = (Element)items.item(k);
+                NodeList cn = item.getChildNodes();
+                for (int l = 0; l < cn.getLength(); l++) {
+                    if (cn.item(l) instanceof Element) {
+                        Element el = (Element)cn.item(l);
+                        if ("rasd:HostResource".equals(el.getNodeName())
+                                && !(el.getTextContent().contains("ovf:/file/" + keepdisk) || el.getTextContent().contains("ovf:/disk/" + keepdisk))) {
+                            s_logger.info("MDOVA to remove " + el.getTextContent());
+                            toremove.add(item);
+                            break;
+                        }
+                    }
+                }
+            }
+
+            for (Element rme : toremove) {
+                s_logger.info("MDOVA remove " + rme.getTagName());
+                if (rme.getParentNode() != null) {
+                    rme.getParentNode().removeChild(rme);
+                }
+            }
+
+            final StringWriter writer = new StringWriter();
+            final StreamResult result = new StreamResult(writer);
+            final TransformerFactory tf = TransformerFactory.newInstance();
+            final Transformer transformer = tf.newTransformer();
+            final DOMSource domSource = new DOMSource(doc);
+            transformer.transform(domSource, result);
+            PrintWriter outfile = new PrintWriter(newOvfFilePath);
+            outfile.write(writer.toString());
+            outfile.close();
+        } catch (SAXException | IOException | ParserConfigurationException | TransformerException e) {
+            s_logger.info("Unexpected exception caught while removing network elements from OVF:" + e.getMessage(), e);
+            throw new CloudRuntimeException(e);
+        }
+    }
+
+    OVFDisk getDisk(String fileRef, List<OVFDisk> disks) {
+        for (OVFDisk disk : disks) {
+            if (disk._fileRef.equals(fileRef)) {
+                return disk;
+            }
+        }
+        return null;
+    }
+
+    class OVFFile {
+        // <File ovf:href="i-2-8-VM-disk2.vmdk" ovf:id="file1" ovf:size="69120" />
+        public String _href;
+        public String _id;
+        public Long _size;
+        public boolean _bootable;
+        public boolean isIso;
+    }
+
+    class OVFDisk {
+        //<Disk ovf:capacity="50" ovf:capacityAllocationUnits="byte * 2^20" ovf:diskId="vmdisk2" ovf:fileRef="file2"
+        //ovf:format="http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" ovf:populatedSize="43319296" />
+        public Long _capacity;
+        public String _capacityUnit;
+        public String _diskId;
+        public String _fileRef;
+        public Long _populatedSize;
+        public OVFDiskController _controller;
+    }
+
+    class OVFDiskController {
+        public String _name;
+        public String _subType;
+    }
+}
diff --git a/api/src/com/cloud/agent/api/to/DatadiskTO.java b/api/src/com/cloud/agent/api/to/DatadiskTO.java
new file mode 100644
index 00000000000..1d3f91e25db
--- /dev/null
+++ b/api/src/com/cloud/agent/api/to/DatadiskTO.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package com.cloud.agent.api.to;
+
+public class DatadiskTO {
+    private String path;
+    private long virtualSize;
+    private long fileSize;
+    boolean bootable;
+    private String diskId;
+    private boolean isIso;
+    private String diskController;
+    private String diskControllerSubType;
+
+    public DatadiskTO() {
+    }
+
+    public DatadiskTO(String path, long virtualSize, long fileSize, boolean bootable) {
+        this.path = path;
+        this.virtualSize = virtualSize;
+        this.fileSize = fileSize;
+        this.bootable = bootable;
+    }
+
+    public DatadiskTO(String path, long virtualSize, long fileSize, String diskId, boolean isIso, boolean bootable, String controller, String controllerSubType) {
+        this.path = path;
+        this.virtualSize = virtualSize;
+        this.fileSize = fileSize;
+        this.bootable = bootable;
+        this.diskId = diskId;
+        this.isIso = isIso;
+        this.diskController = controller;
+        this.diskControllerSubType = controllerSubType;
+    }
+
+    public String getPath() {
+        return path;
+    }
+
+    public void setPath(String path) {
+        this.path = path;
+    }
+
+    public Long getVirtualSize() {
+        return virtualSize;
+    }
+
+    public void setVirtualSize(Long virtualSize) {
+        this.virtualSize = virtualSize;
+    }
+
+    public Long getFileSize() {
+        return fileSize;
+    }
+
+    public boolean isBootable() {
+        return bootable;
+    }
+
+    public String getDiskId() {
+        return diskId;
+    }
+
+    public void setDiskId(String diskId) {
+        this.diskId = diskId;
+    }
+
+    public boolean isIso() {
+        return isIso;
+    }
+
+    public void setIso(boolean isIso) {
+        this.isIso = isIso;
+    }
+
+    public String getDiskController() {
+        return diskController;
+    }
+
+    public void setDiskController(String diskController) {
+        this.diskController = diskController;
+    }
+
+    public String getDiskControllerSubType() {
+        return diskControllerSubType;
+    }
+
+    public void setDiskControllerSubType(String diskControllerSubType) {
+        this.diskControllerSubType = diskControllerSubType;
+    }
+
+}
\ No newline at end of file
diff --git a/api/src/com/cloud/configuration/Resource.java b/api/src/com/cloud/configuration/Resource.java
index 7ef1b0bb90e..0fd51dc2f17 100644
--- a/api/src/com/cloud/configuration/Resource.java
+++ b/api/src/com/cloud/configuration/Resource.java
@@ -37,6 +37,8 @@
         private String name;
         private ResourceOwnerType[] supportedOwners;
         private int ordinal;
+        public static final long bytesToKiB = 1024;
+        public static final long bytesToMiB = 1024 * 1024;
         public static final long bytesToGiB = 1024 * 1024 * 1024;
 
         ResourceType(String name, int ordinal, ResourceOwnerType... supportedOwners) {
diff --git a/api/src/com/cloud/storage/Storage.java b/api/src/com/cloud/storage/Storage.java
index f588aeaf4b3..9093dc34f14 100644
--- a/api/src/com/cloud/storage/Storage.java
+++ b/api/src/com/cloud/storage/Storage.java
@@ -113,7 +113,9 @@ public static ProvisioningType getProvisioningType(String provisioningType){
         SYSTEM, /* routing, system vm template */
         BUILTIN, /* buildin template */
         PERHOST, /* every host has this template, don't need to install it in secondary storage */
-        USER /* User supplied template/iso */
+        USER, /* User supplied template/iso */
+        DATADISK, /* Template corresponding to a datadisk(non root disk) present in an OVA */
+        ISODISK /* Template corresponding to a iso (non root disk) present in an OVA */
     }
 
     public static enum StoragePoolType {
diff --git a/api/src/com/cloud/template/VirtualMachineTemplate.java b/api/src/com/cloud/template/VirtualMachineTemplate.java
index 54d61a4597b..564f3b987be 100644
--- a/api/src/com/cloud/template/VirtualMachineTemplate.java
+++ b/api/src/com/cloud/template/VirtualMachineTemplate.java
@@ -133,6 +133,8 @@
 
     boolean isDynamicallyScalable();
 
+    Long getParentTemplateId();
+
     long getUpdatedCount();
 
     void incrUpdatedCount();
diff --git a/api/src/com/cloud/vm/DiskProfile.java b/api/src/com/cloud/vm/DiskProfile.java
index a37f7aaf57b..d9097748363 100644
--- a/api/src/com/cloud/vm/DiskProfile.java
+++ b/api/src/com/cloud/vm/DiskProfile.java
@@ -139,6 +139,10 @@ public Long getTemplateId() {
         return templateId;
     }
 
+    public void setTemplateId(Long templateId) {
+        this.templateId = templateId;
+    }
+
     /**
      * @return disk offering id that the disk is based on.
      */
diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java
index 178840bfe0b..74090ec40e6 100644
--- a/api/src/com/cloud/vm/UserVmService.java
+++ b/api/src/com/cloud/vm/UserVmService.java
@@ -50,6 +50,7 @@
 import com.cloud.host.Host;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.network.Network.IpAddresses;
+import com.cloud.offering.DiskOffering;
 import com.cloud.offering.ServiceOffering;
 import com.cloud.storage.StoragePool;
 import com.cloud.template.VirtualMachineTemplate;
@@ -197,6 +198,11 @@ UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, E
      * @param dhcpOptionMap
      *           - Maps the dhcp option code and the dhcp value to the network uuid
      * @return UserVm object if successful.
+     * @param dataDiskTemplateToDiskOfferingMap
+     *            - Datadisk template to Disk offering Map
+     *             an optional parameter that creates additional data disks for the virtual machine
+     *             For each of the templates in the map, a data disk will be created from the corresponding
+     *             disk offering obtained from the map
      *
      * @throws InsufficientCapacityException
      *             if there is insufficient capacity to deploy the VM.
@@ -210,7 +216,8 @@ UserVm startVirtualMachine(StartVMCmd cmd) throws StorageUnavailableException, E
     UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> securityGroupIdList,
         Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod,
         String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIp, Boolean displayVm, String keyboard,
-        List<Long> affinityGroupIdList, Map<String, String> customParameter, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException,
+        List<Long> affinityGroupIdList, Map<String, String> customParameter, String customId, Map<String, Map<Integer, String>> dhcpOptionMap,
+        Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException,
         ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
 
     /**
@@ -271,6 +278,11 @@ UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering s
      * @param customId
      * @param dhcpOptionMap
      *             - Maps the dhcp option code and the dhcp value to the network uuid
+     * @param dataDiskTemplateToDiskOfferingMap
+     *            - Datadisk template to Disk offering Map
+     *             an optional parameter that creates additional data disks for the virtual machine
+     *             For each of the templates in the map, a data disk will be created from the corresponding
+     *             disk offering obtained from the map
      * @return UserVm object if successful.
      *
      * @throws InsufficientCapacityException
@@ -285,7 +297,8 @@ UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering s
     UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList,
         List<Long> securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor,
         HTTPMethod httpmethod, String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard,
-        List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException,
+        List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap,
+        Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException,
         ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
 
     /**
@@ -344,6 +357,11 @@ UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOfferin
      * @param customId
      * @param dhcpOptionMap
      *             - Map that maps the DhcpOption code and their value on the Network uuid
+     * @param dataDiskTemplateToDiskOfferingMap
+     *            - Datadisk template to Disk offering Map
+     *             an optional parameter that creates additional data disks for the virtual machine
+     *             For each of the templates in the map, a data disk will be created from the corresponding
+     *             disk offering obtained from the map
      * @return UserVm object if successful.
      *
      * @throws InsufficientCapacityException
@@ -358,7 +376,7 @@ UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOfferin
     UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList, Account owner,
         String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
         String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List<Long> affinityGroupIdList,
-        Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap)
+        Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap)
 
         throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException;
 
diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java
index 0e275b5de15..283388ca337 100644
--- a/api/src/org/apache/cloudstack/api/ApiConstants.java
+++ b/api/src/org/apache/cloudstack/api/ApiConstants.java
@@ -79,6 +79,7 @@
     public static final String MIN_IOPS = "miniops";
     public static final String MAX_IOPS = "maxiops";
     public static final String HYPERVISOR_SNAPSHOT_RESERVE = "hypervisorsnapshotreserve";
+    public static final String DATADISK_OFFERING_LIST = "datadiskofferinglist";
     public static final String DESCRIPTION = "description";
     public static final String DESTINATION_ZONE_ID = "destzoneid";
     public static final String DETAILS = "details";
@@ -207,6 +208,7 @@
     public static final String PARAMS = "params";
     public static final String PARENT_ID = "parentid";
     public static final String PARENT_DOMAIN_ID = "parentdomainid";
+    public static final String PARENT_TEMPLATE_ID = "parenttemplateid";
     public static final String PASSWORD = "password";
     public static final String SHOULD_UPDATE_PASSWORD = "update_passwd_on_host";
     public static final String NEW_PASSWORD = "new_password";
diff --git a/api/src/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/org/apache/cloudstack/api/ResponseGenerator.java
index 7b33ebbea7a..4fb248cd105 100644
--- a/api/src/org/apache/cloudstack/api/ResponseGenerator.java
+++ b/api/src/org/apache/cloudstack/api/ResponseGenerator.java
@@ -286,6 +286,8 @@
 
     Host findHostById(Long hostId);
 
+    DiskOffering findDiskOfferingById(Long diskOfferingId);
+
     VpnUsersResponse createVpnUserResponse(VpnUser user);
 
     RemoteAccessVpnResponse createRemoteAccessVpnResponse(RemoteAccessVpn vpn);
diff --git a/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java
index 772ca2749e7..e7d328495ed 100644
--- a/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java
@@ -72,9 +72,12 @@
     @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "list templates by zoneId")
     private Long zoneId;
 
-    @Parameter(name=ApiConstants.SHOW_REMOVED, type=CommandType.BOOLEAN, description="show removed templates as well")
+    @Parameter(name = ApiConstants.SHOW_REMOVED, type = CommandType.BOOLEAN, description = "show removed templates as well")
     private Boolean showRemoved;
 
+    @Parameter(name = ApiConstants.PARENT_TEMPLATE_ID, type = CommandType.UUID, entityType = TemplateResponse.class, description = "list datadisk templates by parent template id", since = "4.4")
+    private Long parentTemplateId;
+
     /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
     /////////////////////////////////////////////////////
@@ -103,6 +106,10 @@ public Boolean getShowRemoved() {
         return (showRemoved != null ? showRemoved : false);
     }
 
+    public Long getParentTemplateId() {
+        return parentTemplateId;
+    }
+
     public boolean listInReadyState() {
 
         Account account = CallContext.current().getCallingAccount();
diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java
index 548a89d6240..bfe6b0d6c7f 100644
--- a/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java
+++ b/api/src/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java
@@ -46,6 +46,7 @@
 import org.apache.cloudstack.api.response.UserVmResponse;
 import org.apache.cloudstack.api.response.ZoneResponse;
 import org.apache.cloudstack.context.CallContext;
+import org.apache.commons.collections.MapUtils;
 import org.apache.log4j.Logger;
 
 import com.cloud.event.EventTypes;
@@ -58,6 +59,8 @@
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.network.Network;
 import com.cloud.network.Network.IpAddresses;
+import com.cloud.offering.DiskOffering;
+import com.cloud.template.VirtualMachineTemplate;
 import com.cloud.uservm.UserVm;
 import com.cloud.utils.net.Dhcp;
 import com.cloud.utils.net.NetUtils;
@@ -192,6 +195,10 @@
             + " Example: dhcpoptionsnetworklist[0].dhcp:114=url&dhcpoptionsetworklist[0].networkid=networkid&dhcpoptionsetworklist[0].dhcp:66=www.test.com")
     private Map dhcpOptionsNetworkList;
 
+    @Parameter(name = ApiConstants.DATADISK_OFFERING_LIST, type = CommandType.MAP, since = "4.11", description = "datadisk template to disk-offering mapping;" +
+            " an optional parameter used to create additional data disks from datadisk templates; can't be specified with diskOfferingId parameter")
+    private Map dataDiskTemplateToDiskOfferingList;
+
     /////////////////////////////////////////////////////
     /////////////////// Accessors ///////////////////////
     /////////////////////////////////////////////////////
@@ -417,10 +424,10 @@ public String getKeyboard() {
         if (dhcpOptionsNetworkList != null && !dhcpOptionsNetworkList.isEmpty()) {
 
             Collection<Map<String, String>> paramsCollection = this.dhcpOptionsNetworkList.values();
-            for(Map<String, String> dhcpNetworkOptions : paramsCollection) {
+            for (Map<String, String> dhcpNetworkOptions : paramsCollection) {
                 String networkId = dhcpNetworkOptions.get(ApiConstants.NETWORK_ID);
 
-                if(networkId == null) {
+                if (networkId == null) {
                     throw new IllegalArgumentException("No networkid specified when providing extra dhcp options.");
                 }
 
@@ -431,9 +438,9 @@ public String getKeyboard() {
                     if (key.startsWith(ApiConstants.DHCP_PREFIX)) {
                         int dhcpOptionValue = Integer.parseInt(key.replaceFirst(ApiConstants.DHCP_PREFIX, ""));
                         dhcpOptionsForNetwork.put(dhcpOptionValue, dhcpNetworkOptions.get(key));
-                    } else if (!key.equals(ApiConstants.NETWORK_ID)){
-                            Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key);
-                            dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key));
+                    } else if (!key.equals(ApiConstants.NETWORK_ID)) {
+                        Dhcp.DhcpOptionCode dhcpOptionEnum = Dhcp.DhcpOptionCode.valueOfString(key);
+                        dhcpOptionsForNetwork.put(dhcpOptionEnum.getCode(), dhcpNetworkOptions.get(key));
                     }
                 }
 
@@ -443,6 +450,37 @@ public String getKeyboard() {
         return dhcpOptionsMap;
     }
 
+    public Map<Long, DiskOffering> getDataDiskTemplateToDiskOfferingMap() {
+        if (diskOfferingId != null && dataDiskTemplateToDiskOfferingList != null) {
+            throw new InvalidParameterValueException("diskofferingid paramter can't be specified along with datadisktemplatetodiskofferinglist parameter");
+        }
+        if (MapUtils.isEmpty(dataDiskTemplateToDiskOfferingList)) {
+            return new HashMap<Long, DiskOffering>();
+        }
+
+        HashMap<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap = new HashMap<Long, DiskOffering>();
+        for (Object objDataDiskTemplates : dataDiskTemplateToDiskOfferingList.values()) {
+            HashMap<String, String> dataDiskTemplates = (HashMap<String, String>) objDataDiskTemplates;
+            Long dataDiskTemplateId;
+            DiskOffering dataDiskOffering = null;
+            VirtualMachineTemplate dataDiskTemplate= _entityMgr.findByUuid(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid"));
+            if (dataDiskTemplate == null) {
+                dataDiskTemplate = _entityMgr.findById(VirtualMachineTemplate.class, dataDiskTemplates.get("datadisktemplateid"));
+                if (dataDiskTemplate == null)
+                    throw new InvalidParameterValueException("Unable to translate and find entity with datadisktemplateid " + dataDiskTemplates.get("datadisktemplateid"));
+            }
+            dataDiskTemplateId = dataDiskTemplate.getId();
+            dataDiskOffering = _entityMgr.findByUuid(DiskOffering.class, dataDiskTemplates.get("diskofferingid"));
+            if (dataDiskOffering == null) {
+                dataDiskOffering = _entityMgr.findById(DiskOffering.class, dataDiskTemplates.get("diskofferingid"));
+                if (dataDiskOffering == null)
+                    throw new InvalidParameterValueException("Unable to translate and find entity with diskofferingId " + dataDiskTemplates.get("diskofferingid"));
+            }
+            dataDiskTemplateToDiskOfferingMap.put(dataDiskTemplateId, dataDiskOffering);
+        }
+        return dataDiskTemplateToDiskOfferingMap;
+    }
+
     /////////////////////////////////////////////////////
     /////////////// API Implementation///////////////////
     /////////////////////////////////////////////////////
diff --git a/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java b/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java
new file mode 100644
index 00000000000..b036cd48e87
--- /dev/null
+++ b/api/src/org/apache/cloudstack/api/response/ChildTemplateResponse.java
@@ -0,0 +1,66 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.api.response;
+
+import org.apache.cloudstack.api.ApiConstants;
+import org.apache.cloudstack.api.BaseResponse;
+import org.apache.cloudstack.api.EntityReference;
+
+import com.cloud.serializer.Param;
+import com.cloud.template.VirtualMachineTemplate;
+import com.google.gson.annotations.SerializedName;
+
+@EntityReference(value = VirtualMachineTemplate.class)
+@SuppressWarnings("unused")
+public class ChildTemplateResponse extends BaseResponse {
+    @SerializedName(ApiConstants.ID)
+    @Param(description = "the template ID")
+    private String id;
+
+    @SerializedName(ApiConstants.NAME)
+    @Param(description = "the template name")
+    private String name;
+
+    @SerializedName(ApiConstants.SIZE)
+    @Param(description = "the size of the template")
+    private Integer size;
+
+    @SerializedName("templatetype")
+    @Param(description = "the type of the template")
+    private String templateType;
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public void setName(String name) {
+        this.name = name;
+    }
+
+    public void setSize(Integer size) {
+        this.size = size;
+    }
+
+    public void setTemplateType(String templateType) {
+        this.templateType = templateType;
+    }
+
+}
diff --git a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java
index 7cbcd1dc9ff..b3293ce9c81 100644
--- a/api/src/org/apache/cloudstack/api/response/TemplateResponse.java
+++ b/api/src/org/apache/cloudstack/api/response/TemplateResponse.java
@@ -174,7 +174,7 @@
     private Map details;
 
     @SerializedName(ApiConstants.BITS)
-    @Param(description="the processor bit size", since = "4.10")
+    @Param(description = "the processor bit size", since = "4.10")
     private int bits;
 
     @SerializedName(ApiConstants.SSHKEY_ENABLED)
@@ -185,6 +185,14 @@
     @Param(description = "true if template contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory")
     private Boolean isDynamicallyScalable;
 
+    @SerializedName("parenttemplateid")
+    @Param(description = "if Datadisk template, then id of the root disk template this template belongs to")
+    private String parentTemplateId;
+
+    @SerializedName("childtemplates")
+    @Param(description = "if root disk template, then ids of the datas disk templates this template owns")
+    private Set<ChildTemplateResponse> childTemplates;
+
     public TemplateResponse() {
         tags = new LinkedHashSet<ResourceTagResponse>();
     }
@@ -362,4 +370,13 @@ public String getZoneId() {
     public void setBits(int bits) {
         this.bits = bits;
     }
+
+    public void setParentTemplateId(String parentTemplateId) {
+        this.parentTemplateId = parentTemplateId;
+    }
+
+    public void setChildTemplates(Set<ChildTemplateResponse> childTemplateIds) {
+        this.childTemplates = childTemplateIds;
+    }
+
 }
diff --git a/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java
new file mode 100644
index 00000000000..0f009f3d6ce
--- /dev/null
+++ b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateAnswer.java
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import org.apache.cloudstack.storage.to.TemplateObjectTO;
+
+import com.cloud.agent.api.Answer;
+
+public class CreateDatadiskTemplateAnswer extends Answer {
+    private TemplateObjectTO dataDiskTemplate = null;
+
+    public CreateDatadiskTemplateAnswer(TemplateObjectTO dataDiskTemplate) {
+        super(null);
+        this.dataDiskTemplate = dataDiskTemplate;
+    }
+
+    public TemplateObjectTO getDataDiskTemplate() {
+        return dataDiskTemplate;
+    }
+
+    public CreateDatadiskTemplateAnswer(String errMsg) {
+        super(null, false, errMsg);
+    }
+}
\ No newline at end of file
diff --git a/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java
new file mode 100644
index 00000000000..b87d170fe81
--- /dev/null
+++ b/core/src/com/cloud/agent/api/storage/CreateDatadiskTemplateCommand.java
@@ -0,0 +1,71 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.to.DataTO;
+
+public final class CreateDatadiskTemplateCommand extends Command {
+    private DataTO dataDiskTemplate;
+    private String path;
+    private long fileSize;
+    private boolean bootable;
+    private String diskId;
+
+    public CreateDatadiskTemplateCommand(DataTO dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) {
+        super();
+        this.dataDiskTemplate = dataDiskTemplate;
+        this.path = path;
+        this.fileSize = fileSize;
+        this.bootable = bootable;
+        this.diskId = diskId;
+    }
+
+    protected CreateDatadiskTemplateCommand() {
+        super();
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return false;
+    }
+
+    public DataTO getDataDiskTemplate() {
+        return dataDiskTemplate;
+    }
+
+    public String getPath() {
+        return path;
+    }
+
+    public long getFileSize() {
+        return fileSize;
+    }
+
+    public boolean getBootable() {
+        return bootable;
+    }
+
+    public String getDiskId() {
+        return diskId;
+    }
+
+    public void setDiskId(String diskId) {
+        this.diskId = diskId;
+    }
+
+}
\ No newline at end of file
diff --git a/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java b/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java
new file mode 100644
index 00000000000..58922175c55
--- /dev/null
+++ b/core/src/com/cloud/agent/api/storage/GetDatadisksAnswer.java
@@ -0,0 +1,40 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.to.DatadiskTO;
+
+public class GetDatadisksAnswer extends Answer {
+    List<DatadiskTO> dataDiskDetails = new ArrayList<DatadiskTO>();
+
+    public GetDatadisksAnswer(List<DatadiskTO> dataDiskDetails) {
+        super(null);
+        this.dataDiskDetails = dataDiskDetails;
+    }
+
+    public List<DatadiskTO> getDataDiskDetails() {
+        return dataDiskDetails;
+    }
+
+    public GetDatadisksAnswer(String errMsg) {
+        super(null, false, errMsg);
+    }
+}
\ No newline at end of file
diff --git a/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java b/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java
new file mode 100644
index 00000000000..0e22ea25e78
--- /dev/null
+++ b/core/src/com/cloud/agent/api/storage/GetDatadisksCommand.java
@@ -0,0 +1,43 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import com.cloud.agent.api.Command;
+import com.cloud.agent.api.to.DataTO;
+
+public final class GetDatadisksCommand extends Command {
+    private DataTO data;
+
+    public GetDatadisksCommand(DataTO data) {
+        super();
+        this.data = data;
+    }
+
+    protected GetDatadisksCommand() {
+        super();
+    }
+
+    @Override
+    public boolean executeInSequence() {
+        return false;
+    }
+
+    public DataTO getData() {
+        return data;
+    }
+
+}
\ No newline at end of file
diff --git a/core/src/com/cloud/storage/template/OVAProcessor.java b/core/src/com/cloud/storage/template/OVAProcessor.java
index 31523b6e4d0..9d85151d309 100644
--- a/core/src/com/cloud/storage/template/OVAProcessor.java
+++ b/core/src/com/cloud/storage/template/OVAProcessor.java
@@ -20,6 +20,7 @@
 package com.cloud.storage.template;
 
 import java.io.File;
+import java.util.List;
 import java.util.Map;
 
 import javax.naming.ConfigurationException;
@@ -28,10 +29,14 @@
 import org.apache.log4j.Logger;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
 
+import com.cloud.agent.api.storage.OVFHelper;
+import com.cloud.agent.api.to.DatadiskTO;
 import com.cloud.exception.InternalErrorException;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.StorageLayer;
+import com.cloud.utils.Pair;
 import com.cloud.utils.component.AdapterBase;
 import com.cloud.utils.script.Script;
 
@@ -64,6 +69,7 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa
 
         Script command = new Script("tar", 0, s_logger);
         command.add("--no-same-owner");
+        command.add("--no-same-permissions");
         command.add("-xf", templateFileFullPath);
         command.setWorkDir(templateFile.getParent());
         String result = command.execute();
@@ -72,12 +78,37 @@ public FormatInfo process(String templatePath, ImageFormat format, String templa
             throw new InternalErrorException("failed to untar OVA package");
         }
 
+        s_logger.info("MDOVA setting permission for templatePath files " + templatePath);
+        command = new Script("chmod", 0, s_logger);
+        command.add("-R");
+        command.add("666", templatePath);
+        result = command.execute();
+        if (result != null) {
+            s_logger.warn("Unable to set permissions for files in " + templatePath + " due to " + result);
+        }
+        s_logger.info("MDOVA setting permission for templatePath folder " + templatePath);
+        command = new Script("chmod", 0, s_logger);
+        command.add("777", templatePath);
+        result = command.execute();
+        if (result != null) {
+            s_logger.warn("Unable to set permissions for " + templatePath + " due to " + result);
+        }
+
         FormatInfo info = new FormatInfo();
         info.format = ImageFormat.OVA;
         info.filename = templateName + "." + ImageFormat.OVA.getFileExtension();
         info.size = _storage.getSize(templateFilePath);
         info.virtualSize = getTemplateVirtualSize(templatePath, info.filename);
 
+        //vaidate ova
+        String ovfFile = getOVFFilePath(templateFileFullPath);
+        try {
+            OVFHelper ovfHelper = new OVFHelper();
+            List<DatadiskTO> disks = ovfHelper.getOVFVolumeInfo(ovfFile);
+        } catch (Exception e) {
+            s_logger.info("The ovf file " + ovfFile + " is invalid ", e);
+            throw new InternalErrorException("OVA package has bad ovf file " + e.getMessage(), e);
+        }
         // delete original OVA file
         // templateFile.delete();
         return info;
@@ -112,22 +143,44 @@ public long getTemplateVirtualSize(String templatePath, String templateName) thr
             Element disk = (Element)ovfDoc.getElementsByTagName("Disk").item(0);
             virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity"));
             String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
-            if ((virtualSize != 0) && (allocationUnits != null)) {
-                long units = 1;
-                if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes") || allocationUnits.equalsIgnoreCase("byte * 2^10")) {
-                    units = 1024;
-                } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^20")) {
-                    units = 1024 * 1024;
-                } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes") || allocationUnits.equalsIgnoreCase("byte * 2^30")) {
-                    units = 1024 * 1024 * 1024;
+            virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFileName);
+            return virtualSize;
+        } catch (Exception e) {
+            String msg = "getTemplateVirtualSize: Unable to parse OVF XML document " + templatePath + " to get the virtual disk " + templateName + " size due to " + e;
+            s_logger.error(msg);
+            throw new InternalErrorException(msg);
+        }
+    }
+
+    public Pair<Long, Long> getDiskDetails(String ovfFilePath, String diskName) throws InternalErrorException {
+        long virtualSize = 0;
+        long fileSize = 0;
+        String fileId = null;
+        try {
+            Document ovfDoc = null;
+            ovfDoc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new File(ovfFilePath));
+            NodeList disks = ovfDoc.getElementsByTagName("Disk");
+            NodeList files = ovfDoc.getElementsByTagName("File");
+            for (int j = 0; j < files.getLength(); j++) {
+                Element file = (Element)files.item(j);
+                if (file.getAttribute("ovf:href").equals(diskName)) {
+                    fileSize = Long.parseLong(file.getAttribute("ovf:size"));
+                    fileId = file.getAttribute("ovf:id");
+                    break;
                 }
-                virtualSize = virtualSize * units;
-            } else {
-                throw new InternalErrorException("Failed to read capacity and capacityAllocationUnits from the OVF file: " + ovfFileName);
             }
-            return virtualSize;
+            for (int i = 0; i < disks.getLength(); i++) {
+                Element disk = (Element)disks.item(i);
+                if (disk.getAttribute("ovf:fileRef").equals(fileId)) {
+                    virtualSize = Long.parseLong(disk.getAttribute("ovf:capacity"));
+                    String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
+                    virtualSize = OVFHelper.getDiskVirtualSize(virtualSize, allocationUnits, ovfFilePath);
+                    break;
+                }
+            }
+            return new Pair<Long, Long>(virtualSize, fileSize);
         } catch (Exception e) {
-            String msg = "Unable to parse OVF XML document to get the virtual disk size due to" + e;
+            String msg = "getDiskDetails: Unable to parse OVF XML document " + ovfFilePath + " to get the virtual disk " + diskName + " size due to " + e;
             s_logger.error(msg);
             throw new InternalErrorException(msg);
         }
diff --git a/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java b/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java
index 9ee90b72ddd..4a3d058176c 100644
--- a/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java
+++ b/core/src/org/apache/cloudstack/storage/to/TemplateObjectTO.java
@@ -44,6 +44,8 @@
     private Long size;
     private Long physicalSize;
     private Hypervisor.HypervisorType hypervisorType;
+    private boolean bootable;
+    private String uniqueName;
 
     public TemplateObjectTO() {
 
@@ -73,6 +75,8 @@ public TemplateObjectTO(TemplateInfo template) {
         this.accountId = template.getAccountId();
         this.name = template.getUniqueName();
         this.format = template.getFormat();
+        this.uniqueName = template.getUniqueName();
+        this.size = template.getSize();
         if (template.getDataStore() != null) {
             this.imageDataStore = template.getDataStore().getTO();
         }
@@ -215,6 +219,22 @@ public void setPhysicalSize(Long physicalSize) {
         this.physicalSize = physicalSize;
     }
 
+    public void setIsBootable(boolean bootable) {
+        this.bootable = bootable;
+    }
+
+    public boolean isBootable() {
+        return bootable;
+    }
+
+    public String getUniqueName() {
+        return this.uniqueName;
+    }
+
+    public void setUniqueName(String uniqueName) {
+        this.uniqueName = uniqueName;
+    }
+
     @Override
     public String toString() {
         return new StringBuilder("TemplateTO[id=").append(id).append("|origUrl=").append(origUrl).append("|name").append(name).append("]").toString();
diff --git a/engine/api/src/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/com/cloud/vm/VirtualMachineManager.java
index a20fc7b88d7..31e668640d9 100644
--- a/engine/api/src/com/cloud/vm/VirtualMachineManager.java
+++ b/engine/api/src/com/cloud/vm/VirtualMachineManager.java
@@ -36,6 +36,7 @@
 import com.cloud.exception.ResourceUnavailableException;
 import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.network.Network;
+import com.cloud.offering.DiskOffering;
 import com.cloud.offering.DiskOfferingInfo;
 import com.cloud.offering.ServiceOffering;
 import com.cloud.storage.StoragePool;
@@ -74,11 +75,12 @@
      * @param auxiliaryNetworks additional networks to attach the VMs to.
      * @param plan How to deploy the VM.
      * @param hyperType Hypervisor type
+     * @param datadiskTemplateToDiskOfferingMap data disks to be created from datadisk templates and attached to the VM
      * @throws InsufficientCapacityException If there are insufficient capacity to deploy this vm.
      */
     void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, DiskOfferingInfo rootDiskOfferingInfo,
         List<DiskOfferingInfo> dataDiskOfferings, LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, DeploymentPlan plan,
-        HypervisorType hyperType, Map<String, Map<Integer, String>> extraDhcpOptions) throws InsufficientCapacityException;
+        HypervisorType hyperType, Map<String, Map<Integer, String>> extraDhcpOptions, Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException;
 
     void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering,
         LinkedHashMap<? extends Network, List<? extends NicProfile>> networkProfiles, DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException;
diff --git a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
index 451995fc71d..fa6f2c6fb9d 100644
--- a/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
+++ b/engine/api/src/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java
@@ -92,7 +92,8 @@ VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId,
 
     void destroyVolume(Volume volume);
 
-    DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner);
+    DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template,
+            Account owner, Long deviceId);
 
     VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException;
 
diff --git a/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java b/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java
index 871745ec601..5a18b3cab9e 100644
--- a/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java
+++ b/engine/api/src/org/apache/cloudstack/engine/service/api/OrchestrationService.java
@@ -36,6 +36,7 @@
 import com.cloud.deploy.DeploymentPlan;
 import com.cloud.exception.InsufficientCapacityException;
 import com.cloud.hypervisor.Hypervisor;
+import com.cloud.offering.DiskOffering;
 import com.cloud.vm.NicProfile;
 
 @Path("orchestration")
@@ -65,7 +66,8 @@ VirtualMachineEntity createVirtualMachine(@QueryParam("id") String id, @QueryPar
         @QueryParam("cpu") int cpu, @QueryParam("speed") int speed, @QueryParam("ram") long memory, @QueryParam("disk-size") Long diskSize,
         @QueryParam("compute-tags") List<String> computeTags, @QueryParam("root-disk-tags") List<String> rootDiskTags,
         @QueryParam("network-nic-map") Map<String, NicProfile> networkNicMap, @QueryParam("deploymentplan") DeploymentPlan plan,
-        @QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException;
+        @QueryParam("root-disk-size") Long rootDiskSize, @QueryParam("extra-dhcp-option-map") Map<String, Map<Integer, String>> extraDhcpOptionMap,
+        @QueryParam("datadisktemplate-diskoffering-map") Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap) throws InsufficientCapacityException;
 
     @POST
     VirtualMachineEntity createVirtualMachineFromScratch(@QueryParam("id") String id, @QueryParam("owner") String owner, @QueryParam("iso-id") String isoId,
diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java
index ff204c663c0..fc8a769e81f 100644
--- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java
@@ -68,4 +68,6 @@ public TemplateInfo getTemplate() {
     void associateTemplateToZone(long templateId, Long zoneId);
 
     void associateCrosszoneTemplatesToZone(long dcId);
+
+    AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable);
 }
diff --git a/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java b/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java
index 461bd50ab0e..5a0be952b39 100644
--- a/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java
+++ b/engine/api/src/org/apache/cloudstack/storage/image/datastore/ImageStoreEntity.java
@@ -18,17 +18,21 @@
  */
 package org.apache.cloudstack.storage.image.datastore;
 
+import java.util.List;
 import java.util.Set;
 
 import com.cloud.storage.Upload;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 
 import com.cloud.storage.ImageStore;
 import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.agent.api.to.DatadiskTO;
 
 public interface ImageStoreEntity extends DataStore, ImageStore {
     TemplateInfo getTemplate(long templateId);
@@ -46,4 +50,8 @@
     String createEntityExtractUrl(String installPath, ImageFormat format, DataObject dataObject);  // get the entity download URL
 
     void deleteExtractUrl(String installPath, String url, Upload.Type volume);
+
+    List<DatadiskTO> getDataDiskTemplates(DataObject obj);
+
+    Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback<CreateCmdResult> callback);
 }
diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
index 9edf37985d9..30d301057f7 100755
--- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -29,6 +29,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.TimeZone;
 import java.util.UUID;
 import java.util.concurrent.Executors;
@@ -159,6 +160,7 @@
 import com.cloud.network.dao.NetworkVO;
 import com.cloud.network.router.VirtualRouter;
 import com.cloud.network.rules.RulesManager;
+import com.cloud.offering.DiskOffering;
 import com.cloud.offering.DiskOfferingInfo;
 import com.cloud.offering.ServiceOffering;
 import com.cloud.org.Cluster;
@@ -170,6 +172,7 @@
 import com.cloud.storage.ScopeType;
 import com.cloud.storage.Storage.ImageFormat;
 import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.Volume;
 import com.cloud.storage.Volume.Type;
 import com.cloud.storage.VolumeVO;
@@ -391,7 +394,7 @@ public void registerGuru(final VirtualMachine.Type type, final VirtualMachineGur
     @DB
     public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering,
             final DiskOfferingInfo rootDiskOfferingInfo, final List<DiskOfferingInfo> dataDiskOfferings,
-            final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions)
+            final LinkedHashMap<? extends Network, List<? extends NicProfile>> auxiliaryNetworks, final DeploymentPlan plan, final HypervisorType hyperType, final Map<String, Map<Integer, String>> extraDhcpOptions, final Map<Long, DiskOffering> datadiskTemplateToDiskOfferingMap)
                     throws InsufficientCapacityException {
 
         final VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName);
@@ -430,7 +433,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws
 
                 if (template.getFormat() == ImageFormat.ISO) {
                     volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOfferingInfo.getDiskOffering(), rootDiskOfferingInfo.getSize(),
-                            rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
+                            rootDiskOfferingInfo.getMinIops(), rootDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null);
                 } else if (template.getFormat() == ImageFormat.BAREMETAL) {
                     // Do nothing
                 } else {
@@ -441,7 +444,20 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws
                 if (dataDiskOfferings != null) {
                     for (final DiskOfferingInfo dataDiskOfferingInfo : dataDiskOfferings) {
                         volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), dataDiskOfferingInfo.getDiskOffering(), dataDiskOfferingInfo.getSize(),
-                                dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner);
+                                dataDiskOfferingInfo.getMinIops(), dataDiskOfferingInfo.getMaxIops(), vmFinal, template, owner, null);
+                    }
+                }
+                s_logger.info("MDOVA allocate datadiskTemplateToDiskOfferingMap");
+                if (datadiskTemplateToDiskOfferingMap != null && !datadiskTemplateToDiskOfferingMap.isEmpty()) {
+                    s_logger.info("MDOVA allocate datadiskTemplateToDiskOfferingMap isEmpty " + datadiskTemplateToDiskOfferingMap.isEmpty() );
+                    int diskNumber = 1;
+                    for (Entry<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap : datadiskTemplateToDiskOfferingMap.entrySet()) {
+                        DiskOffering diskOffering = dataDiskTemplateToDiskOfferingMap.getValue();
+                        long diskOfferingSize = diskOffering.getDiskSize() / (1024 * 1024 * 1024);
+                        VMTemplateVO dataDiskTemplate = _templateDao.findById(dataDiskTemplateToDiskOfferingMap.getKey());
+                        volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId() + "-" + String.valueOf(diskNumber), diskOffering, diskOfferingSize, null, null,
+                                vmFinal, dataDiskTemplate, owner, Long.valueOf(diskNumber));
+                        diskNumber++;
                     }
                 }
             }
@@ -455,7 +471,7 @@ public void doInTransactionWithoutResult(final TransactionStatus status) throws
     @Override
     public void allocate(final String vmInstanceName, final VirtualMachineTemplate template, final ServiceOffering serviceOffering,
             final LinkedHashMap<? extends Network, List<? extends NicProfile>> networks, final DeploymentPlan plan, final HypervisorType hyperType) throws InsufficientCapacityException {
-        allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType, null);
+        allocate(vmInstanceName, template, serviceOffering, new DiskOfferingInfo(serviceOffering), new ArrayList<DiskOfferingInfo>(), networks, plan, hyperType, null, null);
     }
 
     private VirtualMachineGuru getVmGuru(final VirtualMachine vm) {
@@ -881,7 +897,7 @@ public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfil
         final VirtualMachineTemplate template = _entityMgr.findByIdIncludingRemoved(VirtualMachineTemplate.class, vm.getTemplateId());
 
         if (s_logger.isDebugEnabled()) {
-            s_logger.debug("Trying to deploy VM, vm has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
+            s_logger.debug("MDOVA Trying to deploy VM, vm " + vm.getInstanceName() + "  has dcId: " + vm.getDataCenterId() + " and podId: " + vm.getPodIdToDeployIn());
         }
         DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vm.getPodIdToDeployIn(), null, null, null, null, ctx);
         if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) {
@@ -922,6 +938,7 @@ public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfil
                     // edit plan if this vm's ROOT volume is in READY state already
                     final List<VolumeVO> vols = _volsDao.findReadyRootVolumesByInstance(vm.getId());
                     for (final VolumeVO vol : vols) {
+                        s_logger.debug("MDOVA orchestrateStart reuseVolume " + reuseVolume + " vol " + vol.getName());
                         // make sure if the templateId is unchanged. If it is changed,
                         // let planner
                         // reassign pool for the volume even if it ready.
@@ -1028,10 +1045,12 @@ public void orchestrateStart(final String vmUuid, final Map<VirtualMachineProfil
 
                 try {
                     if (s_logger.isDebugEnabled()) {
-                        s_logger.debug("VM is being created in podId: " + vm.getPodIdToDeployIn());
+                        s_logger.debug("MDOVA orchestrateStart VM is being created in podId: " + vm.getPodIdToDeployIn());
                     }
                     _networkMgr.prepare(vmProfile, new DeployDestination(dest.getDataCenter(), dest.getPod(), null, null), ctx);
+                    s_logger.debug("MDOVA orchestrateStart VM prepare network done");
                     if (vm.getHypervisorType() != HypervisorType.BareMetal) {
+                        s_logger.debug("MDOVA orchestrateStart VM working on volume " + vmProfile._vm.getInstanceName());
                         volumeMgr.prepare(vmProfile, dest);
                     }
                     //since StorageMgr succeeded in volume creation, reuse Volume for further tries until current cluster has capacity
@@ -4776,6 +4795,7 @@ protected VirtualMachine retrieve() {
             orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner()));
         }
         catch (CloudRuntimeException e){
+            e.printStackTrace();
             s_logger.info("Caught CloudRuntimeException, returning job failed " + e);
             CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance");
             return new Pair<JobInfo.Status, String>(JobInfo.Status.FAILED, JobSerializerHelper.toObjectSerializedString(ex));
diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java
index e588431b6b6..91e9b6f57bd 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/CloudOrchestrator.java
@@ -24,6 +24,7 @@
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import javax.inject.Inject;
 
@@ -45,6 +46,7 @@
 import com.cloud.network.Network;
 import com.cloud.network.dao.NetworkDao;
 import com.cloud.network.dao.NetworkVO;
+import com.cloud.offering.DiskOffering;
 import com.cloud.offering.DiskOfferingInfo;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
@@ -155,7 +157,7 @@ public void destroyVolume(String volumeEntity) {
     @Override
     public VirtualMachineEntity createVirtualMachine(String id, String owner, String templateId, String hostName, String displayName, String hypervisor, int cpu,
         int speed, long memory, Long diskSize, List<String> computeTags, List<String> rootDiskTags, Map<String, NicProfile> networkNicMap, DeploymentPlan plan,
-        Long rootDiskSize, Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException {
+        Long rootDiskSize, Map<String, Map<Integer, String>> extraDhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException {
 
         // VirtualMachineEntityImpl vmEntity = new VirtualMachineEntityImpl(id, owner, hostName, displayName, cpu, speed, memory, computeTags, rootDiskTags, networks,
         // vmEntityManager);
@@ -233,8 +235,20 @@ public VirtualMachineEntity createVirtualMachine(String id, String owner, String
             dataDiskOfferings.add(dataDiskOfferingInfo);
         }
 
+        if (dataDiskTemplateToDiskOfferingMap != null && !dataDiskTemplateToDiskOfferingMap.isEmpty()) {
+            for (Entry<Long, DiskOffering> datadiskTemplateToDiskOffering : dataDiskTemplateToDiskOfferingMap.entrySet()) {
+                DiskOffering diskOffering = datadiskTemplateToDiskOffering.getValue();
+                if (diskOffering == null) {
+                    throw new InvalidParameterValueException("Unable to find disk offering " + vm.getDiskOfferingId());
+                }
+                if (diskOffering.getDiskSize() == 0) { // Custom disk offering is not supported for volumes created from datadisk templates
+                    throw new InvalidParameterValueException("Disk offering " + diskOffering + " requires size parameter.");
+                }
+            }
+        }
+
         _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(templateId)), computeOffering, rootDiskOfferingInfo, dataDiskOfferings, networkIpMap, plan,
-            hypervisorType, extraDhcpOptionMap);
+            hypervisorType, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
 
         return vmEntity;
     }
@@ -299,7 +313,7 @@ public VirtualMachineEntity createVirtualMachineFromScratch(String id, String ow
 
         HypervisorType hypervisorType = HypervisorType.valueOf(hypervisor);
 
-        _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap);
+        _itMgr.allocate(vm.getInstanceName(), _templateDao.findById(new Long(isoId)), computeOffering, rootDiskOfferingInfo, new ArrayList<DiskOfferingInfo>(), networkIpMap, plan, hypervisorType, extraDhcpOptionMap, null);
 
         return vmEntity;
     }
diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index 7669b3b98a9..1c132d74732 100644
--- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -654,7 +654,7 @@ protected DiskProfile toDiskProfile(Volume vol, DiskOffering offering) {
     }
 
     @Override
-    public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner) {
+    public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId) {
         if (size == null) {
             size = offering.getDiskSize();
         } else {
@@ -679,13 +679,17 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri
             vol.setInstanceId(vm.getId());
         }
 
-        if (type.equals(Type.ROOT)) {
+        if (deviceId != null) {
+            vol.setDeviceId(deviceId);
+        } else if (type.equals(Type.ROOT)) {
             vol.setDeviceId(0l);
         } else {
             vol.setDeviceId(1l);
         }
         if (template.getFormat() == ImageFormat.ISO) {
             vol.setIsoId(template.getId());
+        } else if (template.getTemplateType().equals(Storage.TemplateType.DATADISK)) {
+            vol.setTemplateId(template.getId());
         }
         // display flag matters only for the User vms
         if (vm.getType() == VirtualMachine.Type.User) {
@@ -1174,6 +1178,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
                 assignedPool = _storagePoolDao.findById(vol.getPoolId());
             }
             if (assignedPool != null) {
+                s_logger.debug("MDOVA getTasks vol name = " + vol.getName() + " storage pool =" + assignedPool.getName());
                 Volume.State state = vol.getState();
                 if (state == Volume.State.Allocated || state == Volume.State.Creating) {
                     VolumeTask task = new VolumeTask(VolumeTaskType.RECREATE, vol, null);
@@ -1226,7 +1231,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
                 }
             } else {
                 if (vol.getPoolId() == null) {
-                    throw new StorageUnavailableException("Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol,
+                    throw new StorageUnavailableException("MDOVA Volume has no pool associate and also no storage pool assigned in DeployDestination, Unable to create " + vol,
                             Volume.class, vol.getId());
                 }
                 if (s_logger.isDebugEnabled()) {
@@ -1252,7 +1257,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
             StoragePool pool = dest.getStorageForDisks().get(vol);
             destPool = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
         }
-
+        s_logger.info("MDOVF recreateVolume destPool.getId() "+ destPool.getId() + " vol.getState() " + vol.getState()  + " vol name" + vol.getName()  + " vol path" + vol.getPath());
         if (vol.getState() == Volume.State.Allocated || vol.getState() == Volume.State.Creating) {
             newVol = vol;
         } else {
@@ -1294,6 +1299,7 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest
 
                 PrimaryDataStore primaryDataStore = (PrimaryDataStore)destPool;
 
+                s_logger.debug("MDOVA recreateVolume dest=" + primaryDataStore.getName() + " is managed " + primaryDataStore.isManaged() + " dest pool " + destPool.getId() + " templ " + templ.getName());
                 if (primaryDataStore.isManaged()) {
                     DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId());
                     HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType();
@@ -1359,13 +1365,14 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
 
         List<VolumeVO> vols = _volsDao.findUsableVolumesForInstance(vm.getId());
         if (s_logger.isDebugEnabled()) {
-            s_logger.debug("Checking if we need to prepare " + vols.size() + " volumes for " + vm);
+            s_logger.debug("MDOVA prepare Checking if we need to prepare " + vols.size() + " volumes for " + vm);
         }
 
         List<VolumeTask> tasks = getTasks(vols, dest.getStorageForDisks(), vm);
         Volume vol = null;
         StoragePool pool = null;
         for (VolumeTask task : tasks) {
+            s_logger.debug("MDOVA prepare task " + task.volume.getName());
             if (task.type == VolumeTaskType.NOP) {
                 pool = (StoragePool)dataStoreMgr.getDataStore(task.pool.getId(), DataStoreRole.Primary);
                 vol = task.volume;
@@ -1391,6 +1398,8 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
                 pool = (StoragePool)dataStoreMgr.getDataStore(result.second().getId(), DataStoreRole.Primary);
                 vol = result.first();
             }
+
+            s_logger.debug("MDOVA prepare task " + task.type + " for " + vol.getName() + ", id " + vol.getId());
             VolumeInfo volumeInfo = volFactory.getVolume(vol.getId());
             DataTO volTO = volumeInfo.getTO();
             DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(),
diff --git a/engine/schema/resources/META-INF/db/schema-41000to41100.sql b/engine/schema/resources/META-INF/db/schema-41000to41100.sql
index 5d51b47a994..6d2ac981a3b 100644
--- a/engine/schema/resources/META-INF/db/schema-41000to41100.sql
+++ b/engine/schema/resources/META-INF/db/schema-41000to41100.sql
@@ -514,5 +514,114 @@ UPDATE `cloud`.`monitoring_services` SET pidfile="/var/run/apache2/apache2.pid"
 -- This fixes a memory allocation issue to systemvms on VMware/ESXi
 UPDATE `cloud`.`vm_template` SET guest_os_id=99 WHERE id=8;
 
+
+-- Support multidisk OVA
+ALTER TABLE `cloud`.`vm_template` ADD COLUMN `parent_template_id` bigint(20) unsigned DEFAULT NULL COMMENT 'If datadisk template, then id of the root template this template belongs to';
+
+DROP VIEW IF EXISTS `cloud`.`template_view`;
+CREATE  VIEW `template_view` AS
+    SELECT
+        `vm_template`.`id` AS `id`,
+        `vm_template`.`uuid` AS `uuid`,
+        `vm_template`.`unique_name` AS `unique_name`,
+        `vm_template`.`name` AS `name`,
+        `vm_template`.`public` AS `public`,
+        `vm_template`.`featured` AS `featured`,
+        `vm_template`.`type` AS `type`,
+        `vm_template`.`hvm` AS `hvm`,
+        `vm_template`.`bits` AS `bits`,
+        `vm_template`.`url` AS `url`,
+        `vm_template`.`format` AS `format`,
+        `vm_template`.`created` AS `created`,
+        `vm_template`.`checksum` AS `checksum`,
+        `vm_template`.`display_text` AS `display_text`,
+        `vm_template`.`enable_password` AS `enable_password`,
+        `vm_template`.`dynamically_scalable` AS `dynamically_scalable`,
+        `vm_template`.`state` AS `template_state`,
+        `vm_template`.`guest_os_id` AS `guest_os_id`,
+        `guest_os`.`uuid` AS `guest_os_uuid`,
+        `guest_os`.`display_name` AS `guest_os_name`,
+        `vm_template`.`bootable` AS `bootable`,
+        `vm_template`.`prepopulate` AS `prepopulate`,
+        `vm_template`.`cross_zones` AS `cross_zones`,
+        `vm_template`.`hypervisor_type` AS `hypervisor_type`,
+        `vm_template`.`extractable` AS `extractable`,
+        `vm_template`.`template_tag` AS `template_tag`,
+        `vm_template`.`sort_key` AS `sort_key`,
+        `vm_template`.`removed` AS `removed`,
+        `vm_template`.`enable_sshkey` AS `enable_sshkey`,
+        `parent_template`.`id` AS `parent_template_id`,
+        `parent_template`.`uuid` AS `parent_template_uuid`,
+        `source_template`.`id` AS `source_template_id`,
+        `source_template`.`uuid` AS `source_template_uuid`,
+        `account`.`id` AS `account_id`,
+        `account`.`uuid` AS `account_uuid`,
+        `account`.`account_name` AS `account_name`,
+        `account`.`type` AS `account_type`,
+        `domain`.`id` AS `domain_id`,
+        `domain`.`uuid` AS `domain_uuid`,
+        `domain`.`name` AS `domain_name`,
+        `domain`.`path` AS `domain_path`,
+        `projects`.`id` AS `project_id`,
+        `projects`.`uuid` AS `project_uuid`,
+        `projects`.`name` AS `project_name`,
+        `data_center`.`id` AS `data_center_id`,
+        `data_center`.`uuid` AS `data_center_uuid`,
+        `data_center`.`name` AS `data_center_name`,
+        `launch_permission`.`account_id` AS `lp_account_id`,
+        `template_store_ref`.`store_id` AS `store_id`,
+        `image_store`.`scope` AS `store_scope`,
+        `template_store_ref`.`state` AS `state`,
+        `template_store_ref`.`download_state` AS `download_state`,
+        `template_store_ref`.`download_pct` AS `download_pct`,
+        `template_store_ref`.`error_str` AS `error_str`,
+        `template_store_ref`.`size` AS `size`,
+        `template_store_ref`.`physical_size` AS `physical_size`,
+        `template_store_ref`.`destroyed` AS `destroyed`,
+        `template_store_ref`.`created` AS `created_on_store`,
+        `vm_template_details`.`name` AS `detail_name`,
+        `vm_template_details`.`value` AS `detail_value`,
+        `resource_tags`.`id` AS `tag_id`,
+        `resource_tags`.`uuid` AS `tag_uuid`,
+        `resource_tags`.`key` AS `tag_key`,
+        `resource_tags`.`value` AS `tag_value`,
+        `resource_tags`.`domain_id` AS `tag_domain_id`,
+        `domain`.`uuid` AS `tag_domain_uuid`,
+        `domain`.`name` AS `tag_domain_name`,
+        `resource_tags`.`account_id` AS `tag_account_id`,
+        `account`.`account_name` AS `tag_account_name`,
+        `resource_tags`.`resource_id` AS `tag_resource_id`,
+        `resource_tags`.`resource_uuid` AS `tag_resource_uuid`,
+        `resource_tags`.`resource_type` AS `tag_resource_type`,
+        `resource_tags`.`customer` AS `tag_customer`,
+        CONCAT(`vm_template`.`id`,
+                '_',
+                IFNULL(`data_center`.`id`, 0)) AS `temp_zone_pair`
+    FROM
+        (((((((((((((`vm_template`
+        JOIN `guest_os` ON ((`guest_os`.`id` = `vm_template`.`guest_os_id`)))
+        JOIN `account` ON ((`account`.`id` = `vm_template`.`account_id`)))
+        JOIN `domain` ON ((`domain`.`id` = `account`.`domain_id`)))
+        LEFT JOIN `projects` ON ((`projects`.`project_account_id` = `account`.`id`)))
+        LEFT JOIN `vm_template_details` ON ((`vm_template_details`.`template_id` = `vm_template`.`id`)))
+        LEFT JOIN `vm_template` `source_template` ON ((`source_template`.`id` = `vm_template`.`source_template_id`)))
+        LEFT JOIN `template_store_ref` ON (((`template_store_ref`.`template_id` = `vm_template`.`id`)
+            AND (`template_store_ref`.`store_role` = 'Image')
+            AND (`template_store_ref`.`destroyed` = 0))))
+        LEFT JOIN `vm_template` `parent_template` ON ((`parent_template`.`id` = `vm_template`.`parent_template_id`)))
+        LEFT JOIN `image_store` ON ((ISNULL(`image_store`.`removed`)
+            AND (`template_store_ref`.`store_id` IS NOT NULL)
+            AND (`image_store`.`id` = `template_store_ref`.`store_id`))))
+        LEFT JOIN `template_zone_ref` ON (((`template_zone_ref`.`template_id` = `vm_template`.`id`)
+            AND ISNULL(`template_store_ref`.`store_id`)
+            AND ISNULL(`template_zone_ref`.`removed`))))
+        LEFT JOIN `data_center` ON (((`image_store`.`data_center_id` = `data_center`.`id`)
+            OR (`template_zone_ref`.`zone_id` = `data_center`.`id`))))
+        LEFT JOIN `launch_permission` ON ((`launch_permission`.`template_id` = `vm_template`.`id`)))
+        LEFT JOIN `resource_tags` ON (((`resource_tags`.`resource_id` = `vm_template`.`id`)
+            AND ((`resource_tags`.`resource_type` = 'Template')
+            OR (`resource_tags`.`resource_type` = 'ISO')))));
+
+
 -- Network External Ids
 ALTER TABLE `cloud`.`networks` ADD `external_id` varchar(255);
diff --git a/engine/schema/src/com/cloud/storage/VMTemplateVO.java b/engine/schema/src/com/cloud/storage/VMTemplateVO.java
index d28c23b1a50..f0d883dac30 100644
--- a/engine/schema/src/com/cloud/storage/VMTemplateVO.java
+++ b/engine/schema/src/com/cloud/storage/VMTemplateVO.java
@@ -146,6 +146,9 @@
     @Column(name = "dynamically_scalable")
     protected boolean dynamicallyScalable;
 
+    @Column(name = "parent_template_id")
+    private Long parentTemplateId;
+
     @Override
     public String getUniqueName() {
         return uniqueName;
@@ -609,4 +612,14 @@ public void setUpdated(Date updated) {
     public Class<?> getEntityType() {
         return VirtualMachineTemplate.class;
     }
+
+    @Override
+    public Long getParentTemplateId() {
+        return parentTemplateId;
+    }
+
+    public void setParentTemplateId(Long parentTemplateId) {
+        this.parentTemplateId = parentTemplateId;
+    }
+
 }
diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java
index dccc902e912..c43a2ea4ee9 100644
--- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java
+++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDao.java
@@ -81,4 +81,6 @@
     void loadDetails(VMTemplateVO tmpl);
 
     void saveDetails(VMTemplateVO tmpl);
+
+    List<VMTemplateVO> listByParentTemplatetId(long parentTemplatetId);
 }
diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
index 2037b3d35f7..dd1f2fcf164 100644
--- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
+++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java
@@ -104,6 +104,7 @@
     private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
     // private SearchBuilder<VMTemplateVO> updateStateSearch;
     private SearchBuilder<VMTemplateVO> AllFieldsSearch;
+    protected SearchBuilder<VMTemplateVO> ParentTemplateIdSearch;
 
     @Inject
     ResourceTagDao _tagsDao;
@@ -135,6 +136,14 @@ public VMTemplateVO findByTemplateName(String templateName) {
         return findOneIncludingRemovedBy(sc);
     }
 
+    @Override
+    public List<VMTemplateVO> listByParentTemplatetId(long parentTemplatetId) {
+        SearchCriteria<VMTemplateVO> sc = ParentTemplateIdSearch.create();
+        sc.setParameters("parentTemplateId", parentTemplatetId);
+        sc.setParameters("state", VirtualMachineTemplate.State.Active);
+        return listBy(sc);
+    }
+
     @Override
     public List<VMTemplateVO> publicIsoSearch(Boolean bootable, boolean listRemoved, Map<String, String> tags) {
 
@@ -403,6 +412,11 @@ public boolean configure(String name, Map<String, Object> params) throws Configu
         AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ);
         AllFieldsSearch.done();
 
+        ParentTemplateIdSearch = createSearchBuilder();
+        ParentTemplateIdSearch.and("parentTemplateId", ParentTemplateIdSearch.entity().getParentTemplateId(), SearchCriteria.Op.EQ);
+        ParentTemplateIdSearch.and("state", ParentTemplateIdSearch.entity().getState(), SearchCriteria.Op.EQ);
+        ParentTemplateIdSearch.done();
+
         return result;
     }
 
diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java
index b2d924d3cf5..75c01c7442c 100644
--- a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java
+++ b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java
@@ -692,4 +692,4 @@ public void performDataMigration(Connection conn) {
         }
 
     }
-}
+}
\ No newline at end of file
diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index 7a1daba910d..75f23628fb8 100644
--- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -157,12 +157,14 @@ protected Answer copyObject(DataObject srcData, DataObject destData, Host destHo
             }
 
             CopyCommand cmd = new CopyCommand(srcForCopy.getTO(), addFullCloneFlagOnVMwareDest(destData.getTO()), _primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value());
+            s_logger.debug("MDOVF copyObject " + srcForCopy.getTO().getPath() + " dest = " + destData.getTO().getId());
             EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcForCopy, destData);
             if (ep == null) {
                 String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
                 s_logger.error(errMsg);
                 answer = new Answer(cmd, false, errMsg);
             } else {
+                s_logger.debug("MDOVA copyObject " + ep.getHostAddr());
                 answer = ep.sendMessage(cmd);
             }
 
@@ -444,6 +446,8 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As
         String errMsg = null;
         try {
             s_logger.debug("copyAsync inspecting src type " + srcData.getType().toString() + " copyAsync inspecting dest type " + destData.getType().toString());
+            s_logger.debug("MDOVF copyAsync inspecting src " + srcData.getTO().getPath() + " copyAsync inspecting dest " + destData.getTO().getPath());
+
 
             if (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.VOLUME) {
                 answer = copyVolumeFromSnapshot(srcData, destData);
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
index 7c1695eebea..bebddd06c99 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java
@@ -18,43 +18,10 @@
  */
 package org.apache.cloudstack.storage.image;
 
-import com.cloud.agent.api.Answer;
-import com.cloud.agent.api.storage.ListTemplateAnswer;
-import com.cloud.agent.api.storage.ListTemplateCommand;
-import com.cloud.alert.AlertManager;
-import com.cloud.configuration.Config;
 import com.cloud.configuration.Resource;
-import com.cloud.dc.DataCenterVO;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.configuration.Resource.ResourceType;
 import com.cloud.event.EventTypes;
 import com.cloud.event.UsageEventUtils;
-import com.cloud.exception.ResourceAllocationException;
-import com.cloud.hypervisor.Hypervisor.HypervisorType;
-import com.cloud.storage.DataStoreRole;
-import com.cloud.storage.ImageStoreDetailsUtil;
-import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.storage.Storage.TemplateType;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.VMTemplateStorageResourceAssoc;
-import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.VMTemplateZoneVO;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VMTemplatePoolDao;
-import com.cloud.storage.dao.VMTemplateZoneDao;
-import com.cloud.storage.template.TemplateConstants;
-import com.cloud.storage.template.TemplateProp;
-import com.cloud.template.TemplateManager;
-import com.cloud.template.VirtualMachineTemplate;
-import com.cloud.user.Account;
-import com.cloud.user.AccountManager;
-import com.cloud.user.ResourceLimitService;
-import com.cloud.utils.UriUtils;
-import com.cloud.utils.db.GlobalLock;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.utils.fsm.NoTransitionException;
-import com.cloud.utils.fsm.StateMachine2;
 import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService;
@@ -90,16 +57,61 @@
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
 import org.apache.cloudstack.storage.image.store.TemplateObject;
 import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 import org.springframework.stereotype.Component;
-
 import javax.inject.Inject;
+
+import java.io.File;
 import java.util.ArrayList;
 import java.util.Date;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.ListTemplateAnswer;
+import com.cloud.agent.api.storage.ListTemplateCommand;
+import com.cloud.agent.api.to.DatadiskTO;
+import com.cloud.alert.AlertManager;
+import com.cloud.configuration.Config;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.exception.ResourceAllocationException;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.DataStoreRole;
+import com.cloud.storage.ImageStoreDetailsUtil;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.Storage;
+import com.cloud.storage.Storage.ImageFormat;
+import com.cloud.storage.Storage.TemplateType;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.VMTemplateStorageResourceAssoc;
+import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.VMTemplateZoneVO;
+import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateZoneDao;
+import com.cloud.storage.template.TemplateConstants;
+import com.cloud.storage.template.TemplateProp;
+import com.cloud.template.TemplateManager;
+import com.cloud.template.VirtualMachineTemplate;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.ResourceLimitService;
+import com.cloud.utils.UriUtils;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.utils.fsm.NoTransitionException;
+import com.cloud.utils.fsm.StateMachine2;
+import com.cloud.vm.VmDetailConstants;
 
 @Component
 public class TemplateServiceImpl implements TemplateService {
@@ -131,8 +143,6 @@
     @Inject
     TemplateDataFactory _templateFactory;
     @Inject
-    VMTemplatePoolDao _tmpltPoolDao;
-    @Inject
     EndPointSelector _epSelector;
     @Inject
     TemplateManager _tmpltMgr;
@@ -144,6 +154,8 @@
     MessageBus _messageBus;
     @Inject
     ImageStoreDetailsUtil imageStoreDetailsUtil;
+    @Inject
+    TemplateDataFactory imageFactory;
 
     class TemplateOpContext<T> extends AsyncRpcContext<T> {
         final TemplateObject template;
@@ -324,6 +336,17 @@ public void handleTemplateSync(DataStore store) {
                         }
                     }
 
+                    for (Iterator<VMTemplateVO> iter = allTemplates.listIterator(); iter.hasNext();) {
+                        VMTemplateVO child_template = iter.next();
+                        if (child_template.getParentTemplateId() != null) {
+                            String uniqueName = child_template.getUniqueName();
+                            if (templateInfos.containsKey(uniqueName)) {
+                                templateInfos.remove(uniqueName);
+                            }
+                            iter.remove();
+                        }
+                    }
+
                     toBeDownloaded.addAll(allTemplates);
 
                     final StateMachine2<VirtualMachineTemplate.State, VirtualMachineTemplate.Event, VirtualMachineTemplate> stateMachine = VirtualMachineTemplate.State.getStateMachine();
@@ -675,6 +698,18 @@ protected Void createTemplateCallback(AsyncCallbackDispatcher<TemplateServiceImp
             return null;
         }
 
+        // Check if OVA contains additional data disks. If yes, create Datadisk templates for each of the additional datadisk present in the OVA
+        if (template.getFormat().equals(ImageFormat.OVA)) {
+            if (!createDataDiskTemplates(template)) {
+                template.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
+                result.setResult(callbackResult.getResult());
+                if (parentCallback != null) {
+                    parentCallback.complete(result);
+                }
+                return null;
+            }
+        }
+
         try {
             template.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed);
         } catch (Exception e) {
@@ -691,6 +726,133 @@ protected Void createTemplateCallback(AsyncCallbackDispatcher<TemplateServiceImp
         return null;
     }
 
+
+    protected boolean createDataDiskTemplates(TemplateInfo parentTemplate) {
+        try {
+            // Get Datadisk template (if any) for OVA
+            List<DatadiskTO> dataDiskTemplates = new ArrayList<DatadiskTO>();
+            ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore();
+            dataDiskTemplates = tmpltStore.getDataDiskTemplates(parentTemplate);
+            s_logger.error("MDOVA createDataDiskTemplates Found " + dataDiskTemplates.size() + " Datadisk template(s) for template: " + parentTemplate.getId());
+            int diskCount = 0;
+            VMTemplateVO templateVO = _templateDao.findById(parentTemplate.getId());
+            DataStore imageStore = parentTemplate.getDataStore();
+            Map<String, String> details = new HashMap<String, String>();
+            for (DatadiskTO diskTemplate : dataDiskTemplates) {
+                if (!diskTemplate.isBootable()) {
+                    createChildDataDiskTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++);
+                    if (!diskTemplate.isIso() && details.get(VmDetailConstants.DATA_DISK_CONTROLLER) == null){
+                        details.put(VmDetailConstants.DATA_DISK_CONTROLLER, getDiskControllerDetails(diskTemplate));
+                        details.put(VmDetailConstants.DATA_DISK_CONTROLLER + diskTemplate.getDiskId(), getDiskControllerDetails(diskTemplate));
+                    }
+                } else {
+                    finalizeParentTemplate(diskTemplate, templateVO, parentTemplate, imageStore, diskCount++);
+                    details.put(VmDetailConstants.ROOT_DISK_CONTROLLER, getDiskControllerDetails(diskTemplate));
+                }
+            }
+            templateVO.setDetails(details);
+            _templateDao.saveDetails(templateVO);
+            return true;
+        } catch (CloudRuntimeException | InterruptedException | ExecutionException e) {
+            return false;
+        }
+    }
+
+    private boolean createChildDataDiskTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO template, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException {
+        // Make an entry in vm_template table
+        Storage.ImageFormat format = dataDiskTemplate.isIso() ? Storage.ImageFormat.ISO : template.getFormat();
+        String suffix = dataDiskTemplate.isIso() ? "-IsoDiskTemplate-" : "-DataDiskTemplate-";
+        TemplateType ttype = dataDiskTemplate.isIso() ? TemplateType.ISODISK : TemplateType.DATADISK;
+        final long templateId = _templateDao.getNextInSequence(Long.class, "id");
+        long guestOsId = dataDiskTemplate.isIso() ? 1 : 0;
+        String templateName = dataDiskTemplate.isIso() ? dataDiskTemplate.getPath().substring(dataDiskTemplate.getPath().lastIndexOf(File.separator) + 1) : template.getName() + suffix + diskCount;
+        VMTemplateVO templateVO = new VMTemplateVO(templateId, templateName, format, false, false, false, ttype, template.getUrl(),
+                template.requiresHvm(), template.getBits(), template.getAccountId(), null, templateName, false, guestOsId, false, template.getHypervisorType(), null,
+                null, false, false);
+        if (dataDiskTemplate.isIso()){
+            templateVO.setUniqueName(templateName);
+        }
+        templateVO.setParentTemplateId(template.getId());
+        templateVO.setSize(dataDiskTemplate.getVirtualSize());
+        templateVO = _templateDao.persist(templateVO);
+        // Make sync call to create Datadisk templates in image store
+        TemplateApiResult result = null;
+        TemplateInfo dataDiskTemplateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore);
+        AsyncCallFuture<TemplateApiResult> future = createDatadiskTemplateAsync(parentTemplate, dataDiskTemplateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(),
+                dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable());
+        result = future.get();
+        if (result.isSuccess()) {
+            // Make an entry in template_zone_ref table
+            if (imageStore.getScope().getScopeType() == ScopeType.REGION) {
+                associateTemplateToZone(templateId, null);
+            } else if (imageStore.getScope().getScopeType() == ScopeType.ZONE) {
+                Long zoneId = ((ImageStoreEntity)imageStore).getDataCenterId();
+                VMTemplateZoneVO templateZone = new VMTemplateZoneVO(zoneId, templateId, new Date());
+                _vmTemplateZoneDao.persist(templateZone);
+            }
+            _resourceLimitMgr.incrementResourceCount(template.getAccountId(), ResourceType.secondary_storage, templateVO.getSize());
+        } else {
+            s_logger.error("MDOVA createDataDiskTemplates Creation of Datadisk: " + templateVO.getId() + " failed: " + result.getResult());
+            // Delete the Datadisk templates that were already created as they are now invalid
+            s_logger.debug("Since creation of Datadisk template: " + templateVO.getId() + " failed, delete other Datadisk templates that were created as part of parent"
+                    + " template download");
+            TemplateInfo parentTemplateInfo = imageFactory.getTemplate(templateVO.getParentTemplateId(), imageStore);
+            cleanupDatadiskTemplates(parentTemplateInfo);
+        }
+        return result.isSuccess();
+    }
+
+    private boolean finalizeParentTemplate(DatadiskTO dataDiskTemplate, VMTemplateVO templateVO, TemplateInfo parentTemplate, DataStore imageStore, int diskCount) throws ExecutionException, InterruptedException, CloudRuntimeException {
+        TemplateInfo templateInfo = imageFactory.getTemplate(templateVO.getId(), imageStore);
+        AsyncCallFuture<TemplateApiResult> templateFuture = createDatadiskTemplateAsync(parentTemplate, templateInfo, dataDiskTemplate.getPath(), dataDiskTemplate.getDiskId(),
+                dataDiskTemplate.getFileSize(), dataDiskTemplate.isBootable());
+        TemplateApiResult result = null;
+        result = templateFuture.get();
+        if (!result.isSuccess()) {
+            s_logger.debug("Since creation of parent template: " + templateInfo.getId() + " failed, delete Datadisk templates that were created as part of parent"
+                    + " template download");
+            cleanupDatadiskTemplates(templateInfo);
+        }
+        return result.isSuccess();
+    }
+
+    private String getDiskControllerDetails(DatadiskTO diskTemplate) {
+        String controller = diskTemplate.getDiskController() ;
+        String controllerSubType = diskTemplate.getDiskControllerSubType();
+        return StringUtils.isNotBlank(controller)
+                ? (controller.contains("IDE") || controller.contains("ide")
+                    ? "ide"
+                    : (controller.contains("SCSI") || controller.contains("scsi")
+                        ? (StringUtils.isNotBlank(controllerSubType)
+                            ? (controllerSubType.equals("lsilogicsas")
+                                ? "lsisas1068"
+                                : controllerSubType)
+                            : "lsilogic")
+                        : "osdefault"))
+                : "lsilogic";
+    }
+
+    private void cleanupDatadiskTemplates(TemplateInfo parentTemplateInfo) {
+        DataStore imageStore = parentTemplateInfo.getDataStore();
+        List<VMTemplateVO> datadiskTemplatesToDelete = _templateDao.listByParentTemplatetId(parentTemplateInfo.getId());
+        for (VMTemplateVO datadiskTemplateToDelete: datadiskTemplatesToDelete) {
+            s_logger.info("Delete template: " + datadiskTemplateToDelete.getId() + " from image store: " + imageStore.getName());
+            AsyncCallFuture<TemplateApiResult> future = deleteTemplateAsync(imageFactory.getTemplate(datadiskTemplateToDelete.getId(), imageStore));
+            try {
+                TemplateApiResult result = future.get();
+                if (!result.isSuccess()) {
+                    s_logger.warn("Failed to delete datadisk template: " + datadiskTemplateToDelete + " from image store: " + imageStore.getName() + " due to: " + result.getResult());
+                    break;
+                }
+                _vmTemplateZoneDao.deletePrimaryRecordsForTemplate(datadiskTemplateToDelete.getId());
+                _resourceLimitMgr.decrementResourceCount(datadiskTemplateToDelete.getAccountId(), ResourceType.secondary_storage, datadiskTemplateToDelete.getSize());
+            } catch (Exception e) {
+                s_logger.debug("Delete datadisk template failed", e);
+                throw new CloudRuntimeException("Delete template Failed", e);
+            }
+        }
+    }
+
     @Override
     public AsyncCallFuture<TemplateApiResult> deleteTemplateAsync(TemplateInfo template) {
         TemplateObject to = (TemplateObject)template;
@@ -1003,4 +1165,72 @@ public void addSystemVMTemplatesToSecondary(DataStore store) {
             }
         }
     }
+
+    private class CreateDataDiskTemplateContext<T> extends AsyncRpcContext<T> {
+        private final DataObject dataDiskTemplate;
+        private final AsyncCallFuture<TemplateApiResult> future;
+
+        public CreateDataDiskTemplateContext(AsyncCompletionCallback<T> callback, DataObject dataDiskTemplate, AsyncCallFuture<TemplateApiResult> future) {
+            super(callback);
+            this.dataDiskTemplate = dataDiskTemplate;
+            this.future = future;
+        }
+
+        public AsyncCallFuture<TemplateApiResult> getFuture() {
+            return this.future;
+        }
+    }
+
+    @Override
+    public AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable) {
+        AsyncCallFuture<TemplateApiResult> future = new AsyncCallFuture<TemplateApiResult>();
+        // Make an entry for disk template in template_store_ref table
+        DataStore store = parentTemplate.getDataStore();
+        TemplateObject dataDiskTemplateOnStore;
+        if (!bootable) {
+            dataDiskTemplateOnStore = (TemplateObject)store.create(dataDiskTemplate);
+            dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.CreateOnlyRequested);
+        } else {
+            dataDiskTemplateOnStore = (TemplateObject) imageFactory.getTemplate(parentTemplate, store);
+        }
+        try {
+            CreateDataDiskTemplateContext<TemplateApiResult> context = new CreateDataDiskTemplateContext<TemplateApiResult>(null, dataDiskTemplateOnStore, future);
+            AsyncCallbackDispatcher<TemplateServiceImpl, CreateCmdResult> caller = AsyncCallbackDispatcher.create(this);
+            caller.setCallback(caller.getTarget().createDatadiskTemplateCallback(null, null)).setContext(context);
+            ImageStoreEntity tmpltStore = (ImageStoreEntity)parentTemplate.getDataStore();
+            tmpltStore.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, fileSize, bootable, caller);
+        } catch (CloudRuntimeException ex) {
+            dataDiskTemplateOnStore.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed);
+            TemplateApiResult result = new TemplateApiResult(dataDiskTemplate);
+            result.setResult(ex.getMessage());
+            if (future != null) {
+                future.complete(result);
+            }
+        }
+        return future;
+    }
+
+    protected Void createDatadiskTemplateCallback(AsyncCallbackDispatcher<TemplateServiceImpl, CreateCmdResult> callback,
+            CreateDataDiskTemplateContext<TemplateApiResult> context) {
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("MDOVA createDatadiskTemplateCallback Performing create datadisk template cross callback after completion");
+        }
+        DataObject dataDiskTemplate = context.dataDiskTemplate;
+        AsyncCallFuture<TemplateApiResult> future = context.getFuture();
+        CreateCmdResult result = callback.getResult();
+        TemplateApiResult dataDiskTemplateResult = new TemplateApiResult((TemplateObject)dataDiskTemplate);
+        try {
+            if (result.isSuccess()) {
+                dataDiskTemplate.processEvent(Event.OperationSuccessed, result.getAnswer());
+            } else {
+                dataDiskTemplate.processEvent(Event.OperationFailed);
+                dataDiskTemplateResult.setResult(result.getResult());
+            }
+        } catch (Exception e) {
+            s_logger.debug("Failed to process create template callback", e);
+            dataDiskTemplateResult.setResult(e.toString());
+        }
+        future.complete(dataDiskTemplateResult);
+        return null;
+    }
 }
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
index 41ce5a230b0..f54673d2b61 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java
@@ -20,6 +20,7 @@
 
 import java.util.Date;
 import java.util.Set;
+import java.util.List;
 import java.util.concurrent.ExecutionException;
 
 import javax.inject.Inject;
@@ -42,7 +43,9 @@
 import org.apache.cloudstack.storage.image.ImageStoreDriver;
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
 import org.apache.cloudstack.storage.to.ImageStoreTO;
-
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import com.cloud.agent.api.to.DatadiskTO;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.capacity.dao.CapacityDao;
 import com.cloud.storage.DataStoreRole;
@@ -214,5 +217,14 @@ public void deleteExtractUrl(String installPath, String url, Upload.Type entityT
         driver.deleteEntityExtractUrl(this, installPath, url, entityType);
     }
 
+    @Override
+    public List<DatadiskTO> getDataDiskTemplates(DataObject obj) {
+        return driver.getDataDiskTemplates(obj);
+    }
+
+    @Override
+    public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable, AsyncCompletionCallback<CreateCmdResult> callback) {
+        return driver.createDataDiskTemplateAsync(dataDiskTemplate, path, diskId, bootable, fileSize, callback);
+    }
 
 }
diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java
index 6e78f190d5d..d13fa25b5a7 100644
--- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java
+++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java
@@ -34,6 +34,7 @@
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
 import org.apache.cloudstack.storage.to.TemplateObjectTO;
+import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer;
 
 import com.cloud.agent.api.Answer;
 import com.cloud.agent.api.to.DataObjectType;
@@ -230,6 +231,16 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answe
                         templateVO.setSize(newTemplate.getSize());
                         imageDao.update(templateVO.getId(), templateVO);
                     }
+                } else if (answer instanceof CreateDatadiskTemplateAnswer) {
+                    CreateDatadiskTemplateAnswer createAnswer = (CreateDatadiskTemplateAnswer)answer;
+                    TemplateObjectTO dataDiskTemplate = createAnswer.getDataDiskTemplate();
+                    TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(getDataStore().getId(), dataDiskTemplate.getId());
+                    templateStoreRef.setInstallPath(dataDiskTemplate.getPath());
+                    templateStoreRef.setDownloadPercent(100);
+                    templateStoreRef.setDownloadState(Status.DOWNLOADED);
+                    templateStoreRef.setSize(dataDiskTemplate.getSize());
+                    templateStoreRef.setPhysicalSize(dataDiskTemplate.getPhysicalSize());
+                    templateStoreDao.update(templateStoreRef.getId(), templateStoreRef);
                 }
             }
             objectInStoreMgr.update(this, event);
@@ -450,6 +461,11 @@ public Long getSourceTemplateId() {
         return imageVO.getSourceTemplateId();
     }
 
+    @Override
+    public Long getParentTemplateId() {
+        return imageVO.getParentTemplateId();
+    }
+
     @Override
     public String getTemplateTag() {
         return imageVO.getTemplateTag();
diff --git a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 158ee18f911..64d74d74d20 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -71,6 +71,9 @@
                             + "left join cluster_details cd on c.id=cd.cluster_id and cd.name='" + CapacityManager.StorageOperationsExcludeCluster.key() + "' "
                             + "where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and s.pool_id = ? ";
 
+    private String findOneHypervisorHostInScopeByType = "select h.id from host h where h.status = 'Up' and h.hypervisor_type = ? ";
+    private String findOneHypervisorHostInScope = "select h.id from host h where h.status = 'Up' and h.hypervisor_type is not null ";
+
     protected boolean moveBetweenPrimaryImage(DataStore srcStore, DataStore destStore) {
         DataStoreRole srcRole = srcStore.getRole();
         DataStoreRole destRole = destStore.getRole();
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
index 288fae4e1a1..244f1128b3a 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java
@@ -20,7 +20,9 @@
 
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.Date;
+import java.util.List;
 import java.util.Map;
 
 import javax.inject.Inject;
@@ -34,6 +36,7 @@
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
 import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
 import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher;
 import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 import org.apache.cloudstack.framework.async.AsyncRpcContext;
@@ -44,9 +47,13 @@
 import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
 import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
+import org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector;
 
 import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand;
 import com.cloud.agent.api.storage.DownloadAnswer;
+import com.cloud.agent.api.storage.GetDatadisksAnswer;
+import com.cloud.agent.api.storage.GetDatadisksCommand;
 import com.cloud.agent.api.to.DataObjectType;
 import com.cloud.agent.api.to.DataTO;
 import com.cloud.alert.AlertManager;
@@ -54,10 +61,15 @@
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.VolumeVO;
 import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.dao.VMTemplateDetailsDao;
 import com.cloud.storage.dao.VMTemplateZoneDao;
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.download.DownloadMonitor;
+import com.cloud.user.ResourceLimitService;
+import com.cloud.user.dao.AccountDao;
+import com.cloud.agent.api.to.DatadiskTO;
 import com.cloud.utils.net.Proxy;
+import com.cloud.utils.exception.CloudRuntimeException;
 
 public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver {
     private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class);
@@ -79,6 +91,14 @@
     VMTemplateZoneDao _vmTemplateZoneDao;
     @Inject
     AlertManager _alertMgr;
+    @Inject
+    VMTemplateDetailsDao _templateDetailsDao;
+    @Inject
+    DefaultEndPointSelector _defaultEpSelector;
+    @Inject
+    AccountDao _accountDao;
+    @Inject
+    ResourceLimitService _resourceLimitMgr;
 
     protected String _proxy = null;
 
@@ -288,6 +308,60 @@ public void resize(DataObject data, AsyncCompletionCallback<CreateCmdResult> cal
     }
 
     @Override
-    public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType){
+    public void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType) {
+    }
+
+    @Override
+    public List<DatadiskTO> getDataDiskTemplates(DataObject obj) {
+        List<DatadiskTO> dataDiskDetails = new ArrayList<DatadiskTO>();
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Get the data disks present in the OVA template");
+        }
+        DataStore store = obj.getDataStore();
+        GetDatadisksCommand cmd = new GetDatadisksCommand(obj.getTO());
+        EndPoint ep = _defaultEpSelector.select(store);
+        Answer answer = null;
+        if (ep == null) {
+            String errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
+            s_logger.error(errMsg);
+            answer = new Answer(cmd, false, errMsg);
+        } else {
+            answer = ep.sendMessage(cmd);
+        }
+        if (answer != null && answer.getResult()) {
+            GetDatadisksAnswer getDatadisksAnswer = (GetDatadisksAnswer)answer;
+            dataDiskDetails = getDatadisksAnswer.getDataDiskDetails(); // Details - Disk path, virtual size
+        }
+        else {
+            s_logger.debug("MDOVA getDataDiskTemplates Data disk command failed ");
+            throw new CloudRuntimeException("Get Data disk command failed " + answer.getDetails());
+        }
+        s_logger.debug("MDOVA getDataDiskTemplates dataDiskDetails " + dataDiskDetails.toString());
+        return dataDiskDetails;
+    }
+
+    @Override
+    public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback<CreateCmdResult> callback) {
+        Answer answer = null;
+        String errMsg = null;
+        if (s_logger.isDebugEnabled()) {
+            s_logger.debug("Create Datadisk template: " + dataDiskTemplate.getId());
+        }
+        CreateDatadiskTemplateCommand cmd = new CreateDatadiskTemplateCommand(dataDiskTemplate.getTO(), path, diskId, fileSize, bootable);
+        EndPoint ep = _defaultEpSelector.select(dataDiskTemplate.getDataStore());
+        if (ep == null) {
+            errMsg = "No remote endpoint to send command, check if host or ssvm is down?";
+            s_logger.error(errMsg);
+            answer = new Answer(cmd, false, errMsg);
+        } else {
+            answer = ep.sendMessage(cmd);
+        }
+        if (answer != null && !answer.getResult()) {
+            errMsg = answer.getDetails();
+        }
+        CreateCmdResult result = new CreateCmdResult(null, answer);
+        result.setResult(errMsg);
+        callback.complete(result);
+        return null;
     }
 }
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java b/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java
index e71529edb8f..70f40f6f5c0 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/ImageStoreDriver.java
@@ -19,14 +19,25 @@
 package org.apache.cloudstack.storage.image;
 
 import com.cloud.storage.Upload;
+
+import java.util.List;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
 import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
 
+import com.cloud.agent.api.to.DatadiskTO;
 import com.cloud.storage.Storage.ImageFormat;
 
 public interface ImageStoreDriver extends DataStoreDriver {
     String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject);
 
     void deleteEntityExtractUrl(DataStore store, String installPath, String url, Upload.Type entityType);
+
+    List<DatadiskTO> getDataDiskTemplates(DataObject obj);
+
+    Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String path, String diskId, boolean bootable, long fileSize, AsyncCompletionCallback<CreateCmdResult> callback);
 }
diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java
index db752fe8a1e..d3c1effd2a1 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/image/TemplateEntityImpl.java
@@ -304,7 +304,11 @@ public void incrUpdatedCount() {
 
     @Override
     public Date getUpdated() {
-        // TODO Auto-generated method stub
+        return null;
+    }
+
+    @Override
+    public Long getParentTemplateId() {
         return null;
     }
 }
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 8818724e722..f9ef71f3de8 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -729,6 +729,8 @@ protected void createVolumeFromBaseImageAsync(VolumeInfo volume, DataObject temp
         caller.setCallback(caller.getTarget().createVolumeFromBaseImageCallBack(null, null));
         caller.setContext(context);
 
+        s_logger.debug("MDOVF createVolumeFromBaseImageAsync templateOnPrimaryStore " + templateOnPrimaryStore.getTO().getPath() + " name=" + templateOnPrimaryStore.getTO().toString());
+        s_logger.debug("MDOVF createVolumeFromBaseImageAsync volume " + volume.getPath() + " name=" + volume.getName());
         motionSrv.copyAsync(context.templateOnStore, volumeOnPrimaryStorage, caller);
         return;
     }
@@ -1178,6 +1180,7 @@ private HostVO getHost(Long zoneId, HypervisorType hypervisorType, boolean compu
     @DB
     @Override
     public AsyncCallFuture<VolumeApiResult> createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template) {
+        s_logger.debug("MDOVF createVolumeFromTemplateAsync volume " + volume.getPath() + " name =" + volume.getName() + " template=" + template.getDisplayText());
         PrimaryDataStore pd = dataStoreMgr.getPrimaryDataStore(dataStoreId);
         TemplateInfo templateOnPrimaryStore = pd.getTemplate(template.getId());
         AsyncCallFuture<VolumeApiResult> future = new AsyncCallFuture<VolumeApiResult>();
diff --git a/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java b/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java
index 28dbcaa951a..91d8b611a0f 100644
--- a/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java
+++ b/framework/cluster/test/com/cloud/cluster/ClusterServiceServletAdapterTest.java
@@ -18,22 +18,31 @@
 
 import static org.junit.Assert.assertTrue;
 
+import org.apache.cloudstack.framework.config.ConfigDepot;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
+import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
+import com.cloud.cluster.dao.ManagementServerHostDao;
 import com.cloud.utils.component.ComponentLifecycle;
 
 @RunWith(MockitoJUnitRunner.class)
 public class ClusterServiceServletAdapterTest {
 
+    @Mock
+    private ClusterManager _manager;
+    @Mock
+    private ManagementServerHostDao _mshostDao;
+    @Mock
+    protected ConfigDepot _configDepot;
+
     ClusterServiceServletAdapter clusterServiceServletAdapter;
     ClusterManagerImpl clusterManagerImpl;
 
     @Before
-    public void setup() throws IllegalArgumentException,
-            IllegalAccessException, NoSuchFieldException, SecurityException {
+    public void setup() throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException {
         clusterServiceServletAdapter = new ClusterServiceServletAdapter();
         clusterManagerImpl = new ClusterManagerImpl();
     }
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
index 00298cedc75..f42751ffa89 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java
@@ -165,7 +165,7 @@ public String createOvaForTemplate(TemplateObjectTO template) {
         String secStorageUrl = nfsStore.getUrl();
         assert (secStorageUrl != null);
         String installPath = template.getPath();
-        String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion);
+        String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion());
         String installFullPath = secondaryMountPoint + "/" + installPath;
         try {
             if (installFullPath.endsWith(".ova")) {
@@ -203,7 +203,7 @@ public String createOvaForVolume(VolumeObjectTO volume) {
         String installPath = volume.getPath();
         int index = installPath.lastIndexOf(File.separator);
         String volumeUuid = installPath.substring(index + 1);
-        String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, _nfsVersion);
+        String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsStore.getNfsVersion());
         //The real volume path
         String volumePath = installPath + File.separator + volumeUuid + ".ova";
         String installFullPath = secondaryMountPoint + "/" + installPath;
@@ -280,8 +280,7 @@ public Answer execute(VmwareHostService hostService, PrimaryStorageDownloadComma
                 assert (morDs != null);
                 DatastoreMO primaryStorageDatastoreMo = new DatastoreMO(context, morDs);
 
-                copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName,
-                        cmd.getNfsVersion());
+                copyTemplateFromSecondaryToPrimary(hyperHost, primaryStorageDatastoreMo, secondaryStorageUrl, mountPoint, templateName, templateUuidName, cmd.getNfsVersion());
             } else {
                 s_logger.info("Template " + templateName + " has already been setup, skip the template setup process in primary storage");
             }
@@ -354,9 +353,8 @@ public Answer execute(VmwareHostService hostService, BackupSnapshotCommand cmd)
                     throw new Exception("Failed to take snapshot " + cmd.getSnapshotName() + " on vm: " + cmd.getVmName());
                 }
 
-                snapshotBackupUuid =
-                        backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid, prevBackupUuid,
-                                hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion());
+                snapshotBackupUuid = backupSnapshotToSecondaryStorage(vmMo, accountId, volumeId, cmd.getVolumePath(), snapshotUuid, secondaryStorageUrl, prevSnapshotUuid,
+                        prevBackupUuid, hostService.getWorkerName(context, cmd, 1), cmd.getNfsVersion());
 
                 success = (snapshotBackupUuid != null);
                 if (success) {
@@ -410,8 +408,7 @@ public Answer execute(VmwareHostService hostService, CreatePrivateTemplateFromVo
             VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(cmd.getVmName());
             if (vmMo == null) {
                 if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() +
-                            ", try within datacenter");
+                    s_logger.debug("Unable to find the owner VM for CreatePrivateTemplateFromVolumeCommand on host " + hyperHost.getHyperHostName() + ", try within datacenter");
                 }
                 vmMo = hyperHost.findVmOnPeerHyperHost(cmd.getVmName());
 
@@ -422,9 +419,8 @@ public Answer execute(VmwareHostService hostService, CreatePrivateTemplateFromVo
                 }
             }
 
-            Ternary<String, Long, Long> result =
-                    createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath,
-                            hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
+            Ternary<String, Long, Long> result = createTemplateFromVolume(vmMo, accountId, templateId, cmd.getUniqueName(), secondaryStoragePoolURL, volumePath,
+                    hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
 
             return new CreatePrivateTemplateAnswer(cmd, true, null, result.first(), result.third(), result.second(), cmd.getUniqueName(), ImageFormat.OVA);
 
@@ -481,9 +477,8 @@ public Answer execute(VmwareHostService hostService, CopyVolumeCommand cmd) {
 
             Pair<String, String> result;
             if (cmd.toSecondaryStorage()) {
-                result =
-                        copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL,
-                                hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
+                result = copyVolumeToSecStorage(hostService, hyperHost, cmd, vmName, volumeId, cmd.getPool().getUuid(), volumePath, secondaryStorageURL,
+                        hostService.getWorkerName(context, cmd, 0), cmd.getNfsVersion());
             } else {
                 StorageFilerTO poolTO = cmd.getPool();
 
@@ -496,8 +491,7 @@ public Answer execute(VmwareHostService hostService, CopyVolumeCommand cmd) {
                     }
                 }
 
-                result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath,
-                        cmd.getNfsVersion());
+                result = copyVolumeFromSecStorage(hyperHost, volumeId, new DatastoreMO(context, morDatastore), secondaryStorageURL, volumePath, cmd.getNfsVersion());
                 deleteVolumeDirOnSecondaryStorage(volumeId, secondaryStorageURL, cmd.getNfsVersion());
             }
             return new CopyVolumeAnswer(cmd, true, null, result.first(), result.second());
@@ -536,8 +530,7 @@ public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCom
             }
 
             DatastoreMO primaryDsMo = new DatastoreMO(hyperHost.getContext(), morPrimaryDs);
-            details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid,
-                    cmd.getNfsVersion());
+            details = createVolumeFromSnapshot(hyperHost, primaryDsMo, newVolumeName, accountId, volumeId, secondaryStorageUrl, backedUpSnapshotUuid, cmd.getNfsVersion());
             if (details == null) {
                 success = true;
             }
@@ -553,13 +546,14 @@ public Answer execute(VmwareHostService hostService, CreateVolumeFromSnapshotCom
         return new CreateVolumeFromSnapshotAnswer(cmd, success, details, newVolumeName);
     }
 
+
     // templateName: name in secondary storage
     // templateUuid: will be used at hypervisor layer
-    private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl,
-            String templatePathAtSecondaryStorage, String templateName, String templateUuid, Integer nfsVersion) throws Exception {
+    private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, String templatePathAtSecondaryStorage,
+            String templateName, String templateUuid, Integer nfsVersion) throws Exception {
 
-        s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " +
-                templatePathAtSecondaryStorage + ", templateName: " + templateName);
+        s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: "
+                + templatePathAtSecondaryStorage + ", templateName: " + templateName);
 
         String secondaryMountPoint = _mountService.getMountPoint(secondaryStorageUrl, nfsVersion);
         s_logger.info("Secondary storage mount point: " + secondaryMountPoint);
@@ -593,9 +587,8 @@ private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost,
 
         VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
         if (vmMo == null) {
-            String msg =
-                    "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage +
-                    ", templateName: " + templateName + ", templateUuid: " + templateUuid;
+            String msg = "Failed to import OVA template. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage
+                    + ", templateName: " + templateName + ", templateUuid: " + templateUuid;
             s_logger.error(msg);
             throw new Exception(msg);
         }
@@ -800,7 +793,7 @@ private void postCreatePrivateTemplate(String installFullPath, long templateId,
         // TODO a bit ugly here
         BufferedWriter out = null;
         try {
-            out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"),"UTF-8"));
+            out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8"));
             out.write("filename=" + templateName + ".ova");
             out.newLine();
             out.write("description=");
@@ -840,7 +833,7 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename,
         // TODO a bit ugly here
         BufferedWriter out = null;
         try {
-            out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"),"UTF-8"));
+            out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8"));
             out.write("ova.filename=" + templateName + ".ova");
             out.newLine();
             out.write("version=1.0");
@@ -860,8 +853,8 @@ private void writeMetaOvaForTemplate(String installFullPath, String ovfFilename,
         }
     }
 
-    private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId,
-            String secStorageUrl, String snapshotBackupUuid, Integer nfsVersion) throws Exception {
+    private String createVolumeFromSnapshot(VmwareHypervisorHost hyperHost, DatastoreMO primaryDsMo, String newVolumeName, long accountId, long volumeId, String secStorageUrl,
+            String snapshotBackupUuid, Integer nfsVersion) throws Exception {
 
         restoreVolumeFromSecStorage(hyperHost, primaryDsMo, newVolumeName, secStorageUrl, getSnapshotRelativeDirInSecStorage(accountId, volumeId), snapshotBackupUuid, nfsVersion);
         return null;
@@ -935,8 +928,8 @@ private String backupSnapshotToSecondaryStorage(VirtualMachineMO vmMo, long acco
         return backupUuid + "/" + backupUuid;
     }
 
-    private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName,
-            String workerVmName, Integer nfsVersion) throws Exception {
+    private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volumePath, String secStorageUrl, String secStorageDir, String exportName, String workerVmName,
+            Integer nfsVersion) throws Exception {
 
         String secondaryMountPoint = _mountService.getMountPoint(secStorageUrl, nfsVersion);
         String exportPath = secondaryMountPoint + "/" + secStorageDir + "/" + exportName;
@@ -980,8 +973,8 @@ private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volume
         }
     }
 
-    private Pair<String, String> copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName,
-            long volumeId, String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception {
+    private Pair<String, String> copyVolumeToSecStorage(VmwareHostService hostService, VmwareHypervisorHost hyperHost, CopyVolumeCommand cmd, String vmName, long volumeId,
+            String poolId, String volumePath, String secStorageUrl, String workerVmName, Integer nfsVersion) throws Exception {
 
         String volumeFolder = String.valueOf(volumeId) + "/";
         VirtualMachineMO workerVm = null;
@@ -1019,8 +1012,8 @@ private void exportVolumeToSecondaryStroage(VirtualMachineMO vmMo, String volume
 
             vmMo.createSnapshot(exportName, "Temporary snapshot for copy-volume command", false, false);
 
-            exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName,
-                    hostService.getWorkerName(hyperHost.getContext(), cmd, 1), nfsVersion);
+            exportVolumeToSecondaryStroage(vmMo, volumePath, secStorageUrl, "volumes/" + volumeFolder, exportName, hostService.getWorkerName(hyperHost.getContext(), cmd, 1),
+                    nfsVersion);
             return new Pair<String, String>(volumeFolder, exportName);
 
         } finally {
@@ -1041,8 +1034,8 @@ private String getVolumePathInDatastore(DatastoreMO dsMo, String volumeFileName,
         return datastoreVolumePath;
     }
 
-    private Pair<String, String> copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName, Integer nfsVersion)
-            throws Exception {
+    private Pair<String, String> copyVolumeFromSecStorage(VmwareHypervisorHost hyperHost, long volumeId, DatastoreMO dsMo, String secStorageUrl, String exportName,
+            Integer nfsVersion) throws Exception {
 
         String volumeFolder = String.valueOf(volumeId) + "/";
         String newVolume = UUID.randomUUID().toString().replaceAll("-", "");
@@ -1098,7 +1091,7 @@ private String createOVAFromMetafile(String metafileName) throws Exception {
             s_logger.info("Package OVA for template in dir: " + exportDir + "cmd: " + command.toString());
             // to be safe, physically test existence of the target OVA file
             if ((new File(exportDir + File.separator + ovaFileName)).exists()) {
-                s_logger.info("OVA file: " + ovaFileName +" is created and ready to extract.");
+                s_logger.info("OVA file: " + ovaFileName + " is created and ready to extract.");
                 return ovaFileName;
             } else {
                 String msg = exportDir + File.separator + ovaFileName + " is not created as expected";
@@ -1135,9 +1128,8 @@ private static String getSnapshotRelativeDirInSecStorage(long accountId, long vo
         return "snapshots/" + accountId + "/" + volumeId;
     }
 
-    private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost,
-            String fileName, ManagedObjectReference morDs, String exceptFileName)
-                    throws Exception{
+    private long getVMSnapshotChainSize(VmwareContext context, VmwareHypervisorHost hyperHost, String fileName, ManagedObjectReference morDs, String exceptFileName)
+            throws Exception {
         long size = 0;
         DatastoreMO dsMo = new DatastoreMO(context, morDs);
         HostDatastoreBrowserMO browserMo = dsMo.getHostDatastoreBrowserMO();
@@ -1187,8 +1179,7 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna
             for (ManagedObjectReference taskMor : tasks) {
                 TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info"));
 
-                if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) &&
-                        info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
+                if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("CreateSnapshot_Task")) {
                     if (!(info.getState().equals(TaskInfoState.SUCCESS) || info.getState().equals(TaskInfoState.ERROR))) {
                         s_logger.debug("There is already a VM snapshot task running, wait for it");
                         context.getVimClient().waitForTask(taskMor);
@@ -1229,8 +1220,7 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna
                     vmMo.removeSnapshot(vmSnapshotName, false);
                 }
             } catch (Exception e1) {
-                s_logger.info("[ignored]"
-                        + "error during snapshot remove: " + e1.getLocalizedMessage());
+                s_logger.info("[ignored]" + "error during snapshot remove: " + e1.getLocalizedMessage());
             }
 
             return new CreateVMSnapshotAnswer(cmd, false, e.getMessage());
@@ -1259,8 +1249,7 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna
                     baseName = baseName.substring(1, baseName.length() - 1);
 
                     vmdkName = fullPath; // for managed storage, vmdkName == fullPath
-                }
-                else {
+                } else {
                     vmdkName = fullPath.split("] ")[1];
 
                     if (vmdkName.endsWith(".vmdk")) {
@@ -1283,8 +1272,8 @@ public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSna
         return mapNewDisk;
     }
 
-    private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String, String> mapNewDisk, VmwareContext context,
-            VmwareHypervisorHost hyperHost, String vmName) throws Exception {
+    private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String, String> mapNewDisk, VmwareContext context, VmwareHypervisorHost hyperHost, String vmName)
+            throws Exception {
         for (VolumeObjectTO volumeTO : volumeTOs) {
             String oldPath = volumeTO.getPath();
 
@@ -1296,8 +1285,7 @@ private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String,
 
                 // remove '[' and ']'
                 baseName = oldPath.substring(1, oldPath.length() - 1);
-            }
-            else {
+            } else {
                 baseName = VmwareHelper.trimSnapshotDeltaPostfix(volumeTO.getPath());
             }
 
@@ -1308,7 +1296,7 @@ private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String,
             ManagedObjectReference morDs = getDatastoreAsManagedObjectReference(baseName, hyperHost, store);
             long size = getVMSnapshotChainSize(context, hyperHost, baseName + "*.vmdk", morDs, newPath);
 
-            if (volumeTO.getVolumeType()== Volume.Type.ROOT) {
+            if (volumeTO.getVolumeType() == Volume.Type.ROOT) {
                 // add memory snapshot size
                 size += getVMSnapshotChainSize(context, hyperHost, vmName + "*.vmsn", morDs, null);
             }
@@ -1318,7 +1306,7 @@ private void setVolumeToPathAndSize(List<VolumeObjectTO> volumeTOs, Map<String,
         }
     }
 
-    private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store)  throws Exception {
+    private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseName, VmwareHypervisorHost hyperHost, DataStoreTO store) throws Exception {
         try {
             // if baseName equates to a datastore name, this should be managed storage
             ManagedObjectReference morDs = hyperHost.findDatastoreByName(baseName);
@@ -1326,10 +1314,8 @@ private ManagedObjectReference getDatastoreAsManagedObjectReference(String baseN
             if (morDs != null) {
                 return morDs;
             }
-        }
-        catch (Exception ex) {
-            s_logger.info("[ignored]"
-                    + "error getting managed object refference: " + ex.getLocalizedMessage());
+        } catch (Exception ex) {
+            s_logger.info("[ignored]" + "error getting managed object refference: " + ex.getLocalizedMessage());
         }
 
         // not managed storage, so use the standard way of getting a ManagedObjectReference for a datastore
@@ -1407,8 +1393,7 @@ public RevertToVMSnapshotAnswer execute(VmwareHostService hostService, RevertToV
             for (ManagedObjectReference taskMor : tasks) {
                 TaskInfo info = (TaskInfo)(context.getVimClient().getDynamicProperty(taskMor, "info"));
 
-                if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) &&
-                        info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) {
+                if (info.getEntityName().equals(cmd.getVmName()) && StringUtils.isNotBlank(info.getName()) && info.getName().equalsIgnoreCase("RevertToSnapshot_Task")) {
                     s_logger.debug("There is already a VM snapshot task running, wait for it");
                     context.getVimClient().waitForTask(taskMor);
                 }
diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
index 574c9ec6bf7..9c93d0e815a 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java
@@ -25,6 +25,8 @@
 import java.net.URL;
 import java.nio.channels.SocketChannel;
 import java.rmi.RemoteException;
+
+import com.cloud.configuration.Resource.ResourceType;
 import org.joda.time.Duration;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -507,10 +509,10 @@ public Answer executeRequest(Command cmd) {
             } else if (clz == ResizeVolumeCommand.class) {
                 return execute((ResizeVolumeCommand)cmd);
             } else if (clz == UnregisterVMCommand.class) {
-                return execute((UnregisterVMCommand) cmd);
+                return execute((UnregisterVMCommand)cmd);
             } else if (cmd instanceof StorageSubSystemCommand) {
                 checkStorageProcessorAndHandlerNfsVersionAttribute((StorageSubSystemCommand)cmd);
-                return storageHandler.handleStorageCommands((StorageSubSystemCommand) cmd);
+                return storageHandler.handleStorageCommands((StorageSubSystemCommand)cmd);
             } else if (clz == ScaleVmCommand.class) {
                 return execute((ScaleVmCommand)cmd);
             } else if (clz == PvlanSetupCommand.class) {
@@ -540,8 +542,7 @@ public Answer executeRequest(Command cmd) {
                         PropertyMapDynamicBean mbeanToRemove = _cmdMBeans.get(0);
                         _cmdMBeans.remove(0);
 
-                        JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(),
-                                "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name"));
+                        JmxUtil.unregisterMBean("VMware " + _morHyperHost.getValue(), "Command " + mbeanToRemove.getProp("Sequence") + "-" + mbeanToRemove.getProp("Name"));
                     }
                 } catch (Exception e) {
                     if (s_logger.isTraceEnabled())
@@ -568,11 +569,13 @@ public Answer executeRequest(Command cmd) {
      * @param cmd command to execute
      */
     protected void checkStorageProcessorAndHandlerNfsVersionAttribute(StorageSubSystemCommand cmd) {
-        if (storageNfsVersion != null) return;
-        if (cmd instanceof CopyCommand){
-            EnumMap<VmwareStorageProcessorConfigurableFields,Object> params = new EnumMap<VmwareStorageProcessorConfigurableFields,Object>(VmwareStorageProcessorConfigurableFields.class);
-            examineStorageSubSystemCommandNfsVersion((CopyCommand) cmd, params);
-            params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand) cmd, params);
+        if (storageNfsVersion != null)
+            return;
+        if (cmd instanceof CopyCommand) {
+            EnumMap<VmwareStorageProcessorConfigurableFields, Object> params = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(
+                    VmwareStorageProcessorConfigurableFields.class);
+            examineStorageSubSystemCommandNfsVersion((CopyCommand)cmd, params);
+            params = examineStorageSubSystemCommandFullCloneFlagForVmware((CopyCommand)cmd, params);
             reconfigureProcessorByHandler(params);
         }
     }
@@ -581,10 +584,10 @@ protected void checkStorageProcessorAndHandlerNfsVersionAttribute(StorageSubSyst
      * Reconfigure processor by handler
      * @param params params
      */
-    protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfigurableFields,Object> params) {
-        VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler) storageHandler;
+    protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
+        VmwareStorageSubsystemCommandHandler handler = (VmwareStorageSubsystemCommandHandler)storageHandler;
         boolean success = handler.reconfigureStorageProcessor(params);
-        if (success){
+        if (success) {
             s_logger.info("VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler successfully reconfigured");
         } else {
             s_logger.error("Error while reconfiguring VmwareStorageProcessor and VmwareStorageSubsystemCommandHandler, params=" + _gson.toJson(params));
@@ -597,14 +600,15 @@ protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfi
      * @param params params
      * @return copy of params including new values, if suitable
      */
-    protected EnumMap<VmwareStorageProcessorConfigurableFields,Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields,Object> params) {
+    protected EnumMap<VmwareStorageProcessorConfigurableFields, Object> examineStorageSubSystemCommandFullCloneFlagForVmware(CopyCommand cmd,
+            EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
         EnumMap<VmwareStorageProcessorConfigurableFields, Object> paramsCopy = new EnumMap<VmwareStorageProcessorConfigurableFields, Object>(params);
         HypervisorType hypervisor = cmd.getDestTO().getHypervisorType();
-        if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)){
+        if (hypervisor != null && hypervisor.equals(HypervisorType.VMware)) {
             DataStoreTO destDataStore = cmd.getDestTO().getDataStore();
-            if (destDataStore instanceof PrimaryDataStoreTO){
-                PrimaryDataStoreTO dest = (PrimaryDataStoreTO) destDataStore;
-                if (dest.isFullCloneFlag() != null){
+            if (destDataStore instanceof PrimaryDataStoreTO) {
+                PrimaryDataStoreTO dest = (PrimaryDataStoreTO)destDataStore;
+                if (dest.isFullCloneFlag() != null) {
                     paramsCopy.put(VmwareStorageProcessorConfigurableFields.FULL_CLONE_FLAG, dest.isFullCloneFlag().booleanValue());
                 }
             }
@@ -617,15 +621,15 @@ protected void reconfigureProcessorByHandler(EnumMap<VmwareStorageProcessorConfi
      * @param cmd command to execute
      * @param params params
      */
-    protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields,Object> params){
+    protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap<VmwareStorageProcessorConfigurableFields, Object> params) {
         DataStoreTO srcDataStore = cmd.getSrcTO().getDataStore();
         boolean nfsVersionFound = false;
 
-        if (srcDataStore instanceof NfsTO){
-            nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO) srcDataStore);
+        if (srcDataStore instanceof NfsTO) {
+            nfsVersionFound = getStorageNfsVersionFromNfsTO((NfsTO)srcDataStore);
         }
 
-        if (nfsVersionFound){
+        if (nfsVersionFound) {
             params.put(VmwareStorageProcessorConfigurableFields.NFS_VERSION, storageNfsVersion);
         }
     }
@@ -635,8 +639,8 @@ protected void examineStorageSubSystemCommandNfsVersion(CopyCommand cmd, EnumMap
      * @param nfsTO nfsTO
      * @return true if NFS version was found and not null, false in other case
      */
-    protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO){
-        if (nfsTO != null && nfsTO.getNfsVersion() != null){
+    protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO) {
+        if (nfsTO != null && nfsTO.getNfsVersion() != null) {
             storageNfsVersion = nfsTO.getNfsVersion();
             return true;
         }
@@ -646,7 +650,7 @@ protected boolean getStorageNfsVersionFromNfsTO(NfsTO nfsTO){
     /**
      * Registers the vm to the inventory given the vmx file.
      */
-    private void registerVm(String vmName, DatastoreMO dsMo) throws Exception{
+    private void registerVm(String vmName, DatastoreMO dsMo) throws Exception {
 
         //1st param
         VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
@@ -673,8 +677,8 @@ private void registerVm(String vmName, DatastoreMO dsMo) throws Exception{
     private Answer execute(ResizeVolumeCommand cmd) {
         String path = cmd.getPath();
         String vmName = cmd.getInstanceName();
-        long newSize = cmd.getNewSize() / 1024;
-        long oldSize = cmd.getCurrentSize()/1024;
+        long newSize = cmd.getNewSize() / ResourceType.bytesToKiB;
+        long oldSize = cmd.getCurrentSize() / ResourceType.bytesToKiB;
         boolean useWorkerVm = false;
 
         VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
@@ -686,9 +690,10 @@ private Answer execute(ResizeVolumeCommand cmd) {
 
         try {
             if (newSize < oldSize) {
-                throw new Exception("VMware doesn't support shrinking volume from larger size: " + oldSize/(1024*1024) + " GB to a smaller size: " + newSize/(1024*1024) + " GB");
+                throw new Exception(
+                        "VMware doesn't support shrinking volume from larger size: " + oldSize / ResourceType.bytesToMiB + " GB to a smaller size: " + newSize / ResourceType.bytesToMiB + " GB");
             } else if (newSize == oldSize) {
-                return new ResizeVolumeAnswer(cmd, true, "success", newSize*1024);
+                return new ResizeVolumeAnswer(cmd, true, "success", newSize * ResourceType.bytesToKiB);
             }
             if (vmName.equalsIgnoreCase("none")) {
                 // we need to spawn a worker VM to attach the volume to and
@@ -706,7 +711,7 @@ private Answer execute(ResizeVolumeCommand cmd) {
 
                 synchronized (this) {
                     vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, path + ".vmdk");
-                    vmMo.attachDisk(new String[] { vmdkDataStorePath }, morDS);
+                    vmMo.attachDisk(new String[] {vmdkDataStorePath}, morDS);
                 }
             }
             // find VM through datacenter (VM is not at the target host yet)
@@ -725,8 +730,8 @@ private Answer execute(ResizeVolumeCommand cmd) {
             }
             // IDE virtual disk cannot be re-sized if VM is running
             if (vdisk.second() != null && vdisk.second().contains("ide")) {
-                throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. " +
-                            "Please re-try when virtual disk is attached to a VM using SCSI controller.");
+                throw new Exception("Re-sizing a virtual disk over IDE controller is not supported in VMware hypervisor. "
+                        + "Please re-try when virtual disk is attached to a VM using SCSI controller.");
             }
 
             if (vdisk.second() != null && !vdisk.second().toLowerCase().startsWith("scsi"))
@@ -958,12 +963,10 @@ private VirtualDevice findVirtualNicDevice(VirtualMachineMO vmMo, String mac) th
     protected ExecutionResult prepareNetworkElementCommand(SetupGuestNetworkCommand cmd) {
         NicTO nic = cmd.getNic();
         String routerIp = getRouterSshControlIp(cmd);
-        String domrName =
-                cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
+        String domrName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
 
         try {
-            int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp,
-                    nic.getMac());
+            int ethDeviceNum = findRouterEthDeviceIndex(domrName, routerIp, nic.getMac());
             nic.setDeviceId(ethDeviceNum);
         } catch (Exception e) {
             String msg = "Prepare SetupGuestNetwork failed due to " + e.toString();
@@ -973,7 +976,6 @@ protected ExecutionResult prepareNetworkElementCommand(SetupGuestNetworkCommand
         return new ExecutionResult(true, null);
     }
 
-
     private ExecutionResult prepareNetworkElementCommand(IpAssocVpcCommand cmd) {
         String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
         String routerIp = getRouterSshControlIp(cmd);
@@ -1020,13 +1022,11 @@ protected ExecutionResult prepareNetworkElementCommand(SetSourceNatCommand cmd)
 
     private ExecutionResult prepareNetworkElementCommand(SetNetworkACLCommand cmd) {
         NicTO nic = cmd.getNic();
-        String routerName =
-                cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
+        String routerName = cmd.getAccessDetail(NetworkElementCommand.ROUTER_NAME);
         String routerIp = getRouterSshControlIp(cmd);
 
         try {
-            int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp,
-                    nic.getMac());
+            int ethDeviceNum = findRouterEthDeviceIndex(routerName, routerIp, nic.getMac());
             nic.setDeviceId(ethDeviceNum);
         } catch (Exception e) {
             String msg = "Prepare SetNetworkACL failed due to " + e.toString();
@@ -1073,7 +1073,7 @@ private PlugNicAnswer execute(PlugNicCommand cmd) {
             VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000;
             Map<String, String> details = cmd.getDetails();
             if (details != null) {
-                nicDeviceType = VirtualEthernetCardType.valueOf((String) details.get("nicAdapter"));
+                nicDeviceType = VirtualEthernetCardType.valueOf((String)details.get("nicAdapter"));
             }
 
             // find a usable device number in VMware environment
@@ -1528,8 +1528,8 @@ protected ScaleVmAnswer execute(ScaleVmCommand cmd) {
             hotaddIncrementSizeInMb = vmMo.getHotAddMemoryIncrementSizeInMb();
             hotaddMemoryLimitInMb = vmMo.getHotAddMemoryLimitInMb();
             if (requestedMaxMemoryInMb > hotaddMemoryLimitInMb) {
-                throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB." +
-                        " Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB.");
+                throw new CloudRuntimeException("Memory of VM " + vmMo.getVmName() + " cannot be scaled to " + requestedMaxMemoryInMb + "MB."
+                        + " Requested memory limit is beyond the hotadd memory limit for this VM at the moment is " + hotaddMemoryLimitInMb + "MB.");
             }
 
             // Check increment is multiple of increment size
@@ -1628,9 +1628,10 @@ protected StartAnswer execute(StartCommand cmd) {
         String dataDiskController = vmSpec.getDetails().get(VmDetailConstants.DATA_DISK_CONTROLLER);
         String rootDiskController = vmSpec.getDetails().get(VmDetailConstants.ROOT_DISK_CONTROLLER);
         DiskTO rootDiskTO = null;
+        s_logger.info("MDOVA controller rootDiskController= " + rootDiskController + " dataDiskController = " + dataDiskController);
         // If root disk controller is scsi, then data disk controller would also be scsi instead of using 'osdefault'
         // This helps avoid mix of different scsi subtype controllers in instance.
-        if (DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
+        if (DiskControllerType.osdefault == DiskControllerType.getType(dataDiskController) && DiskControllerType.lsilogic == DiskControllerType.getType(rootDiskController)) {
             dataDiskController = DiskControllerType.scsi.toString();
         }
 
@@ -1638,6 +1639,8 @@ protected StartAnswer execute(StartCommand cmd) {
         dataDiskController = DiskControllerType.getType(dataDiskController).toString();
         rootDiskController = DiskControllerType.getType(rootDiskController).toString();
 
+        s_logger.info("MDOVA controller fianlly  rootDiskController= " + rootDiskController + " dataDiskController = " + dataDiskController);
+
         if (DiskControllerType.getType(rootDiskController) == DiskControllerType.none) {
             throw new CloudRuntimeException("Invalid root disk controller detected : " + rootDiskController);
         }
@@ -1659,9 +1662,9 @@ protected StartAnswer execute(StartCommand cmd) {
 
             // Validate VM name is unique in Datacenter
             VirtualMachineMO vmInVcenter = dcMo.checkIfVmAlreadyExistsInVcenter(vmNameOnVcenter, vmInternalCSName);
-            if(vmInVcenter != null) {
+            if (vmInVcenter != null) {
                 vmAlreadyExistsInVcenter = true;
-                String msg = "VM with name: " + vmNameOnVcenter +" already exists in vCenter.";
+                String msg = "VM with name: " + vmNameOnVcenter + " already exists in vCenter.";
                 s_logger.error(msg);
                 throw new Exception(msg);
             }
@@ -1759,8 +1762,7 @@ protected StartAnswer execute(StartCommand cmd) {
                                 String datastoreName = VmwareResource.getDatastoreName(details.get(DiskTO.IQN));
 
                                 rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName);
-                            }
-                            else {
+                            } else {
                                 DataStoreTO primaryStore = vol.getData().getDataStore();
 
                                 rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid());
@@ -1781,9 +1783,9 @@ protected StartAnswer execute(StartCommand cmd) {
                             }
                         }
                         tearDownVm(vmMo);
-                    }else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(),
-                            getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec),
-                            guestOsId, rootDiskDataStoreDetails.first(), false, controllerInfo, systemVm)) {
+                    } else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(), getReservedCpuMHZ(vmSpec),
+                            vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / ResourceType.bytesToMiB), getReservedMemoryMb(vmSpec), guestOsId, rootDiskDataStoreDetails.first(), false,
+                            controllerInfo, systemVm)) {
                         throw new Exception("Failed to create VM. vmName: " + vmInternalCSName);
                     }
                 }
@@ -1808,9 +1810,8 @@ protected StartAnswer execute(StartCommand cmd) {
 
             VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
 
-            VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(),
-                    getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec),
-                    guestOsId, vmSpec.getLimitCpuUse());
+            VmwareHelper.setBasicVmConfig(vmConfigSpec, vmSpec.getCpus(), vmSpec.getMaxSpeed(), getReservedCpuMHZ(vmSpec), (int)(vmSpec.getMaxRam() / (1024 * 1024)),
+                    getReservedMemoryMb(vmSpec), guestOsId, vmSpec.getLimitCpuUse());
 
             // Check for multi-cores per socket settings
             int numCoresPerSocket = 1;
@@ -1870,9 +1871,8 @@ protected StartAnswer execute(StartCommand cmd) {
                 DatastoreMO secDsMo = new DatastoreMO(hyperHost.getContext(), morSecDs);
 
                 deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
-                Pair<VirtualDevice, Boolean> isoInfo =
-                        VmwareHelper.prepareIsoDevice(vmMo, String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(),
-                                true, true, ideUnitNumber++, i + 1);
+                Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo,
+                        String.format("[%s] systemvm/%s", secDsMo.getName(), mgr.getSystemVMIsoFileNameOnDatastore()), secDsMo.getMor(), true, true, ideUnitNumber++, i + 1);
                 deviceConfigSpecArray[i].setDevice(isoInfo.first());
                 if (isoInfo.second()) {
                     if (s_logger.isDebugEnabled())
@@ -1901,8 +1901,8 @@ protected StartAnswer execute(StartCommand cmd) {
                         assert (isoDatastoreInfo.second() != null);
 
                         deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec();
-                        Pair<VirtualDevice, Boolean> isoInfo =
-                                VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++, i + 1);
+                        Pair<VirtualDevice, Boolean> isoInfo = VmwareHelper.prepareIsoDevice(vmMo, isoDatastoreInfo.first(), isoDatastoreInfo.second(), true, true, ideUnitNumber++,
+                                i + 1);
                         deviceConfigSpecArray[i].setDevice(isoInfo.first());
                         if (isoInfo.second()) {
                             if (s_logger.isDebugEnabled())
@@ -1989,14 +1989,10 @@ protected StartAnswer execute(StartCommand cmd) {
 
                     assert (volumeDsDetails != null);
 
-                    String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec,
-                            vol, matchingExistingDisk,
-                            dataStoresDetails);
-                    if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
+                    String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, vol, matchingExistingDisk, dataStoresDetails);
+                    if (controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber))
                         scsiUnitNumber++;
-                    VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey,
-                            diskChain,
-                            volumeDsDetails.first(),
+                    VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, diskChain, volumeDsDetails.first(),
                             (controllerKey == vmMo.getIDEControllerKey(ideUnitNumber)) ? ((ideUnitNumber++) % VmwareHelper.MAX_IDE_CONTROLLER_COUNT) : scsiUnitNumber++, i + 1);
 
                     if (vol.getType() == Volume.Type.ROOT)
@@ -2004,7 +2000,7 @@ protected StartAnswer execute(StartCommand cmd) {
                     deviceConfigSpecArray[i].setDevice(device);
                     deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD);
 
-                    if(s_logger.isDebugEnabled())
+                    if (s_logger.isDebugEnabled())
                         s_logger.debug("Prepare volume at new device " + _gson.toJson(device));
 
                     i++;
@@ -2094,8 +2090,8 @@ protected StartAnswer execute(StartCommand cmd) {
                 boolean configureVServiceInNexus = (nicTo.getType() == TrafficType.Guest) && (vmSpec.getDetails().containsKey("ConfigureVServiceInNexus"));
                 VirtualMachine.Type vmType = cmd.getVirtualMachine().getType();
                 Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo, configureVServiceInNexus, vmType);
-                if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch) ||
-                        (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))){
+                if ((nicTo.getBroadcastType() != BroadcastDomainType.Lswitch)
+                        || (nicTo.getBroadcastType() == BroadcastDomainType.Lswitch && NiciraNvpApiVersion.isApiVersionLowerThan("4.2"))) {
                     if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
                         String dvSwitchUuid;
                         ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
@@ -2113,8 +2109,7 @@ protected StartAnswer execute(StartCommand cmd) {
                         nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(),
                                 nicTo.getMac(), i + 1, true, true);
                     }
-                }
-                else{
+                } else {
                     //if NSX API VERSION >= 4.2, connect to br-int (nsx.network), do not create portgroup else previous behaviour
                     nic = VmwareHelper.prepareNicOpaque(vmMo, nicDeviceType, networkInfo.second(),
                             nicTo.getMac(), i + 1, true, true);
@@ -2162,8 +2157,8 @@ protected StartAnswer execute(StartCommand cmd) {
             String keyboardLayout = null;
             if (vmSpec.getDetails() != null)
                 keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD);
-            vmConfigSpec.getExtraConfig().addAll(
-                    Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout)));
+            vmConfigSpec.getExtraConfig()
+                    .addAll(Arrays.asList(configureVnc(extraOptions.toArray(new OptionValue[0]), hyperHost, vmInternalCSName, vmSpec.getVncPassword(), keyboardLayout)));
 
             // config video card
             configureVideoCard(vmMo, vmSpec, vmConfigSpec);
@@ -2222,7 +2217,7 @@ protected StartAnswer execute(StartCommand cmd) {
             String msg = "StartCommand failed due to " + VmwareHelper.getExceptionMessage(e);
             s_logger.warn(msg, e);
             StartAnswer startAnswer = new StartAnswer(cmd, msg);
-            if(vmAlreadyExistsInVcenter) {
+            if (vmAlreadyExistsInVcenter) {
                 startAnswer.setContextParam("stopRetry", "true");
             }
 
@@ -2233,7 +2228,7 @@ protected StartAnswer execute(StartCommand cmd) {
                     DatastoreFile fileInDatastore = new DatastoreFile(existingVmFileInfo.getVmPathName());
                     DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName()));
                     registerVm(existingVmName, existingVmDsMo);
-                } catch (Exception ex){
+                } catch (Exception ex) {
                     String message = "Failed to register an existing VM: " + existingVmName + " due to " + VmwareHelper.getExceptionMessage(ex);
                     s_logger.warn(message, ex);
                 }
@@ -2329,13 +2324,12 @@ protected String replaceNicsMacSequenceInBootArgs(String oldMacSequence, String
      * @throws Exception exception
      */
     protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, VirtualMachineConfigSpec vmConfigSpec) throws Exception {
-        if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)){
+        if (vmSpec.getDetails().containsKey(VmDetailConstants.SVGA_VRAM_SIZE)) {
             String value = vmSpec.getDetails().get(VmDetailConstants.SVGA_VRAM_SIZE);
             try {
                 long svgaVmramSize = Long.parseLong(value);
                 setNewVRamSizeVmVideoCard(vmMo, svgaVmramSize, vmConfigSpec);
-            }
-            catch (NumberFormatException e){
+            } catch (NumberFormatException e) {
                 s_logger.error("Unexpected value, cannot parse " + value + " to long due to: " + e.getMessage());
             }
         }
@@ -2348,9 +2342,9 @@ protected void configureVideoCard(VirtualMachineMO vmMo, VirtualMachineTO vmSpec
      * @param vmConfigSpec virtual machine config spec
      */
     protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) throws Exception {
-        for (VirtualDevice device : vmMo.getAllDeviceList()){
-            if (device instanceof VirtualMachineVideoCard){
-                VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard) device;
+        for (VirtualDevice device : vmMo.getAllDeviceList()) {
+            if (device instanceof VirtualMachineVideoCard) {
+                VirtualMachineVideoCard videoCard = (VirtualMachineVideoCard)device;
                 modifyVmVideoCardVRamSize(videoCard, vmMo, svgaVmramSize, vmConfigSpec);
             }
         }
@@ -2364,7 +2358,7 @@ protected void setNewVRamSizeVmVideoCard(VirtualMachineMO vmMo, long svgaVmramSi
      * @param vmConfigSpec virtual machine config spec
      */
     protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, VirtualMachineMO vmMo, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) {
-        if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize){
+        if (videoCard.getVideoRamSizeInKB().longValue() != svgaVmramSize) {
             s_logger.info("Video card memory was set " + videoCard.getVideoRamSizeInKB().longValue() + "kb instead of " + svgaVmramSize + "kb");
             configureSpecVideoCardNewVRamSize(videoCard, svgaVmramSize, vmConfigSpec);
         }
@@ -2376,7 +2370,7 @@ protected void modifyVmVideoCardVRamSize(VirtualMachineVideoCard videoCard, Virt
      * @param svgaVmramSize new svga vram size (in KB)
      * @param vmConfigSpec virtual machine spec
      */
-    protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec){
+    protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCard, long svgaVmramSize, VirtualMachineConfigSpec vmConfigSpec) {
         videoCard.setVideoRamSizeInKB(svgaVmramSize);
         videoCard.setUseAutoDetect(false);
 
@@ -2387,9 +2381,10 @@ protected void configureSpecVideoCardNewVRamSize(VirtualMachineVideoCard videoCa
         vmConfigSpec.getDeviceChange().add(arrayVideoCardConfigSpecs);
     }
 
-    private void tearDownVm(VirtualMachineMO vmMo) throws Exception{
+    private void tearDownVm(VirtualMachineMO vmMo) throws Exception {
 
-        if(vmMo == null) return;
+        if (vmMo == null)
+            return;
 
         boolean hasSnapshot = false;
         hasSnapshot = vmMo.hasSnapshot();
@@ -2401,17 +2396,17 @@ private void tearDownVm(VirtualMachineMO vmMo) throws Exception{
     }
 
     int getReservedMemoryMb(VirtualMachineTO vmSpec) {
-         if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) {
-             return  (int) (vmSpec.getMinRam() / (1024 * 1024));
-         }
-         return 0;
+        if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveMemory.key()).equalsIgnoreCase("true")) {
+            return (int)(vmSpec.getMinRam() / ResourceType.bytesToMiB);
+        }
+        return 0;
     }
 
     int getReservedCpuMHZ(VirtualMachineTO vmSpec) {
-         if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
-             return vmSpec.getMinSpeed() * vmSpec.getCpus();
-         }
-         return 0;
+        if (vmSpec.getDetails().get(VMwareGuru.VmwareReserveCpu.key()).equalsIgnoreCase("true")) {
+            return vmSpec.getMinSpeed() * vmSpec.getCpus();
+        }
+        return 0;
     }
 
     // return the finalized disk chain for startup, from top to bottom
@@ -2433,8 +2428,7 @@ int getReservedCpuMHZ(VirtualMachineTO vmSpec) {
         String datastoreName = isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid();
         Pair<ManagedObjectReference, DatastoreMO> volumeDsDetails = dataStoresDetails.get(datastoreName);
 
-        if (volumeDsDetails == null)
-        {
+        if (volumeDsDetails == null) {
             throw new Exception("Primary datastore " + primaryStore.getUuid() + " is not mounted on host.");
         }
 
@@ -2554,7 +2548,7 @@ private static void setNuageVspVrIpInExtraConfig(List<OptionValue> extraOptions,
         }
 
         OptionValue newVal;
-        if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null)  {
+        if (nicTo.getType().equals(TrafficType.Guest) && dvSwitchUuid != null && nicTo.getGateway() != null && nicTo.getNetmask() != null) {
             String vrIp = nicTo.getBroadcastUri().getPath().substring(1);
             newVal = new OptionValue();
             newVal.setKey("vsp.vr-ip." + nicTo.getMac());
@@ -2688,13 +2682,13 @@ private static void postNvpConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachi
         }
     }
 
-    private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol,
-            VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
+    private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBuilder diskInfoBuilder, DiskTO vol, VmwareHypervisorHost hyperHost, VmwareContext context)
+            throws Exception {
         if (diskInfoBuilder != null) {
             VolumeObjectTO volume = (VolumeObjectTO)vol.getData();
 
             String dsName = null;
-            String diskBackingFileBaseName= null;
+            String diskBackingFileBaseName = null;
 
             Map<String, String> details = vol.getDetails();
             boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED));
@@ -2706,8 +2700,7 @@ private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBui
                 dsName = VmwareResource.getDatastoreName(iScsiName);
 
                 diskBackingFileBaseName = new DatastoreFile(volume.getPath()).getFileBaseName();
-            }
-            else {
+            } else {
                 ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, volume.getDataStore().getUuid());
                 DatastoreMO dsMo = new DatastoreMO(context, morDs);
 
@@ -2716,8 +2709,7 @@ private VirtualMachineDiskInfo getMatchingExistingDisk(VirtualMachineDiskInfoBui
                 diskBackingFileBaseName = volume.getPath();
             }
 
-            VirtualMachineDiskInfo diskInfo =
-                    diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName);
+            VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(diskBackingFileBaseName, dsName);
             if (diskInfo != null) {
                 s_logger.info("Found existing disk info from volume path: " + volume.getPath());
                 return diskInfo;
@@ -2768,12 +2760,12 @@ private int getDiskController(VirtualMachineDiskInfo matchingExistingDisk, DiskT
             Map<String, String> vmDetails = vmSpec.getDetails();
             if (vmDetails != null && vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER) != null) {
                 if (vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER).equalsIgnoreCase("scsi")) {
-                    s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: " +
-                            vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
+                    s_logger.info("Chose disk controller for vol " + vol.getType() + " -> scsi, based on root disk controller settings: "
+                            + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
                     controllerKey = scsiControllerKey;
                 } else {
-                    s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: " +
-                            vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
+                    s_logger.info("Chose disk controller for vol " + vol.getType() + " -> ide, based on root disk controller settings: "
+                            + vmDetails.get(VmDetailConstants.ROOT_DISK_CONTROLLER));
                     controllerKey = ideControllerKey;
                 }
             } else {
@@ -2820,8 +2812,9 @@ private String getDiskController(VirtualMachineMO vmMo, VirtualMachineDiskInfo m
             return controllerInfo.second();
         }
     }
-    private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey,
-            int scsiControllerKey, Map<String, String> iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
+
+    private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO vmSpec, DiskTO[] sortedDisks, int ideControllerKey, int scsiControllerKey,
+            Map<String, String> iqnToPath, VmwareHypervisorHost hyperHost, VmwareContext context) throws Exception {
         VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder();
 
         for (DiskTO vol : sortedDisks) {
@@ -2852,8 +2845,7 @@ private void postDiskConfigBeforeStart(VirtualMachineMO vmMo, VirtualMachineTO v
                     if (s_logger.isInfoEnabled())
                         s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + diskChain[0]);
                 }
-            }
-            else {
+            } else {
                 if (!file.getFileBaseName().equalsIgnoreCase(volumeTO.getPath())) {
                     if (s_logger.isInfoEnabled())
                         s_logger.info("Detected disk-chain top file change on volume: " + volumeTO.getId() + " " + volumeTO.getPath() + " -> " + file.getFileBaseName());
@@ -2969,8 +2961,8 @@ public int compare(DiskTO arg0, DiskTO arg1) {
         return listForSort.toArray(new DiskTO[0]);
     }
 
-    private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context,
-            DiskTO[] disks, Command cmd) throws Exception {
+    private HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> inferDatastoreDetailsFromDiskInfo(VmwareHypervisorHost hyperHost, VmwareContext context, DiskTO[] disks,
+            Command cmd) throws Exception {
         HashMap<String, Pair<ManagedObjectReference, DatastoreMO>> mapIdToMors = new HashMap<String, Pair<ManagedObjectReference, DatastoreMO>>();
 
         assert (hyperHost != null) && (context != null);
@@ -2997,12 +2989,10 @@ public int compare(DiskTO arg0, DiskTO arg1) {
                         // if the datastore is not present, we need to discover the iSCSI device that will support it,
                         // create the datastore, and create a VMDK file in the datastore
                         if (morDatastore == null) {
-                            morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName,
-                                    details.get(DiskTO.STORAGE_HOST), Integer.parseInt(details.get(DiskTO.STORAGE_PORT)),
-                                    volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null,
-                                    details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET),
-                                    details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET),
-                                    Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
+                            morDatastore = _storageProcessor.prepareManagedStorage(context, hyperHost, null, iScsiName, details.get(DiskTO.STORAGE_HOST),
+                                    Integer.parseInt(details.get(DiskTO.STORAGE_PORT)), volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : null,
+                                    details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), details.get(DiskTO.CHAP_TARGET_USERNAME),
+                                    details.get(DiskTO.CHAP_TARGET_SECRET), Long.parseLong(details.get(DiskTO.VOLUME_SIZE)), cmd);
 
                             DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDatastore);
                             String datastoreVolumePath = dsMo.getDatastorePath((volumeTO.getVolumeType() == Volume.Type.ROOT ? volumeTO.getName() : dsMo.getName()) + ".vmdk");
@@ -3012,8 +3002,7 @@ public int compare(DiskTO arg0, DiskTO arg1) {
                         }
 
                         mapIdToMors.put(datastoreName, new Pair<ManagedObjectReference, DatastoreMO>(morDatastore, new DatastoreMO(context, morDatastore)));
-                    }
-                    else {
+                    } else {
                         ManagedObjectReference morDatastore = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolUuid);
 
                         if (morDatastore == null) {
@@ -3051,8 +3040,7 @@ private DatastoreMO getDatastoreThatRootDiskIsOn(HashMap<String, Pair<ManagedObj
                     rootDiskDataStoreDetails = dataStoresDetails.get(datastoreName);
 
                     break;
-                }
-                else {
+                } else {
                     DataStoreTO primaryStore = vol.getData().getDataStore();
 
                     rootDiskDataStoreDetails = dataStoresDetails.get(primaryStore.getUuid());
@@ -3109,7 +3097,8 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) {
         return defaultVlan;
     }
 
-    private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType) throws Exception {
+    private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo, boolean configureVServiceInNexus, VirtualMachine.Type vmType)
+            throws Exception {
 
         Ternary<String, String, String> switchDetails = getTargetSwitch(nicTo);
         VirtualSwitchType switchType = VirtualSwitchType.getType(switchDetails.second());
@@ -3147,14 +3136,7 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) {
 
     // return Ternary <switch name, switch tyep, vlan tagging>
     private Ternary<String, String, String> getTargetSwitch(NicTO nicTo) throws CloudException {
-        TrafficType[] supportedTrafficTypes =
-                new TrafficType[] {
-                TrafficType.Guest,
-                TrafficType.Public,
-                TrafficType.Control,
-                TrafficType.Management,
-                TrafficType.Storage
-        };
+        TrafficType[] supportedTrafficTypes = new TrafficType[] {TrafficType.Guest, TrafficType.Public, TrafficType.Control, TrafficType.Management, TrafficType.Storage};
 
         TrafficType trafficType = nicTo.getType();
         if (!Arrays.asList(supportedTrafficTypes).contains(trafficType)) {
@@ -3165,7 +3147,7 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) {
         VirtualSwitchType switchType = VirtualSwitchType.StandardVirtualSwitch;
         String vlanId = Vlan.UNTAGGED;
 
-        if(nicTo.getName() != null && !nicTo.getName().isEmpty()) {
+        if (StringUtils.isNotBlank(nicTo.getName())) {
             // Format of network traffic label is <VSWITCH>,<VLANID>,<VSWITCHTYPE>
             // If all 3 fields are mentioned then number of tokens would be 3.
             // If only <VSWITCH>,<VLANID> are mentioned then number of tokens would be 2.
@@ -3192,9 +3174,9 @@ private String getVlanInfo(NicTO nicTo, String defaultVlan) {
 
         if (switchType == VirtualSwitchType.NexusDistributedVirtualSwitch) {
             if (trafficType == TrafficType.Management || trafficType == TrafficType.Storage) {
-                throw new CloudException("Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() +
-                        " is not supported over virtual switch type " + switchType +
-                        ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label.");
+                throw new CloudException(
+                        "Unable to configure NIC " + nicTo.toString() + " as traffic type " + trafficType.toString() + " is not supported over virtual switch type " + switchType
+                                + ". Please specify only supported type of virtual switches i.e. {vmwaresvs, vmwaredvs} in physical network traffic label.");
             }
         }
 
@@ -3737,7 +3719,7 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
         VolumeTO volume;
         StorageFilerTO filerTo;
         Set<String> mountedDatastoresAtSource = new HashSet<String>();
-        List<VolumeObjectTO> volumeToList =  new ArrayList<VolumeObjectTO>();
+        List<VolumeObjectTO> volumeToList = new ArrayList<VolumeObjectTO>();
         Map<Long, Integer> volumeDeviceKey = new HashMap<Long, Integer>();
 
         List<Pair<VolumeTO, StorageFilerTO>> volToFiler = cmd.getVolumeToFilerAsList();
@@ -3776,7 +3758,8 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
                 morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid());
                 morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid());
                 if (morDsAtTarget == null) {
-                    String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + " to execute MigrateWithStorageCommand";
+                    String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName()
+                            + " to execute MigrateWithStorageCommand";
                     s_logger.error(msg);
                     throw new Exception(msg);
                 }
@@ -3805,12 +3788,13 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
                     // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration.
                     if (filerTo.getType().equals(StoragePoolType.VMFS)) {
                         if (morDsAtSource == null) {
-                            s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration.");
+                            s_logger.warn(
+                                    "If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration.");
                             throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not mounted on source host: " + _hostName);
                         }
                         DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morDsAtSource);
                         String srcHostValue = srcHyperHost.getMor().getValue();
-                        if(!dsAtSourceMo.isAccessibleToHost(srcHostValue)) {
+                        if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) {
                             s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to accessible to source host for a successful live storage migration.");
                             throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not accessible on source host: " + _hostName);
                         }
@@ -3892,8 +3876,8 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
                 if (!vmMo.changeDatastore(relocateSpec)) {
                     throw new Exception("Change datastore operation failed during storage migration");
                 } else {
-                    s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() +
-                            " and its storage to target datastore(s)");
+                    s_logger.debug(
+                            "Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + " and its storage to target datastore(s)");
                 }
             }
 
@@ -3943,8 +3927,8 @@ protected Answer execute(MigrateWithStorageCommand cmd) {
                 try {
                     srcHyperHost.unmountDatastore(mountedDatastore);
                 } catch (Exception unmountEx) {
-                    s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName +
-                            ". Please unmount manually to cleanup.");
+                    s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName
+                            + ". Please unmount manually to cleanup.");
                 }
                 s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName);
             }
@@ -3989,7 +3973,8 @@ private Answer execute(MigrateVolumeCommand cmd) {
             vmName = vmMo.getName();
             morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, tgtDsName);
             if (morDs == null) {
-                String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName() +" to execute MigrateVolumeCommand";
+                String msg = "Unable to find the mounted datastore with name: " + tgtDsName + " on source host: " + srcHyperHost.getHyperHostName()
+                        + " to execute MigrateVolumeCommand";
                 s_logger.error(msg);
                 throw new Exception(msg);
             }
@@ -4091,9 +4076,9 @@ protected Answer execute(CreateStoragePoolCommand cmd) {
             try {
                 VmwareContext context = getServiceContext();
 
-                _storageProcessor.prepareManagedDatastore(context, getHyperHost(context),
-                        cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(CreateStoragePoolCommand.IQN),
-                        cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT)));
+                _storageProcessor.prepareManagedDatastore(context, getHyperHost(context), cmd.getDetails().get(CreateStoragePoolCommand.DATASTORE_NAME),
+                        cmd.getDetails().get(CreateStoragePoolCommand.IQN), cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_HOST),
+                        Integer.parseInt(cmd.getDetails().get(CreateStoragePoolCommand.STORAGE_PORT)));
             } catch (Exception ex) {
                 return new Answer(cmd, false, "Issue creating datastore");
             }
@@ -4163,8 +4148,7 @@ private void handleTargets(boolean add, List<Map<String, String>> targets, HostM
         if (targets != null && targets.size() > 0) {
             try {
                 _storageProcessor.handleTargetsForHost(add, targets, host);
-            }
-            catch (Exception ex) {
+            } catch (Exception ex) {
                 s_logger.warn(ex.getMessage());
             }
         }
@@ -4178,11 +4162,10 @@ protected Answer execute(DeleteStoragePoolCommand cmd) {
         try {
             if (cmd.getRemoveDatastore()) {
                 _storageProcessor.handleDatastoreAndVmdkDetach(cmd.getDetails().get(DeleteStoragePoolCommand.DATASTORE_NAME), cmd.getDetails().get(DeleteStoragePoolCommand.IQN),
-                    cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
+                        cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_HOST), Integer.parseInt(cmd.getDetails().get(DeleteStoragePoolCommand.STORAGE_PORT)));
 
                 return new Answer(cmd, true, "success");
-            }
-            else {
+            } else {
                 // We will leave datastore cleanup management to vCenter. Since for cluster VMFS datastore, it will always
                 // be mounted by vCenter.
 
@@ -4233,12 +4216,12 @@ protected Answer execute(AttachIsoCommand cmd) {
                     if (cmd.isAttach()) {
                         vmMo.mountToolsInstaller();
                     } else {
-                        try{
+                        try {
                             if (!vmMo.unmountToolsInstaller()) {
                                 return new Answer(cmd, false,
                                         "Failed to unmount vmware-tools installer ISO as the corresponding CDROM device is locked by VM. Please unmount the CDROM device inside the VM and ret-try.");
                             }
-                        }catch(Throwable e){
+                        } catch (Throwable e) {
                             vmMo.detachIso(null);
                         }
                     }
@@ -4318,10 +4301,10 @@ public synchronized ManagedObjectReference prepareSecondaryDatastoreOnSpecificHo
 
     private static String getSecondaryDatastoreUUID(String storeUrl) {
         String uuid = null;
-        try{
-            uuid=UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString();
-        }catch(UnsupportedEncodingException e){
-            s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error." );
+        try {
+            uuid = UUID.nameUUIDFromBytes(storeUrl.getBytes("UTF-8")).toString();
+        } catch (UnsupportedEncodingException e) {
+            s_logger.warn("Failed to create UUID from string " + storeUrl + ". Bad storeUrl or UTF-8 encoding error.");
         }
         return uuid;
     }
@@ -4522,8 +4505,8 @@ protected Answer execute(GetStorageStatsCommand cmd) {
                 long used = capacity - free;
 
                 if (s_logger.isDebugEnabled()) {
-                    s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " +
-                            cmd.getPooltype() + ", capacity: " + capacity + ", free: " + free + ", used: " + used);
+                    s_logger.debug("Datastore summary info, storageId: " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype()
+                            + ", capacity: " + capacity + ", free: " + free + ", used: " + used);
                 }
 
                 if (summary.getCapacity() <= 0) {
@@ -4532,9 +4515,8 @@ protected Answer execute(GetStorageStatsCommand cmd) {
 
                 return new GetStorageStatsAnswer(cmd, capacity, used);
             } else {
-                String msg =
-                        "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " +
-                                cmd.getPooltype();
+                String msg = "Could not find datastore for GetStorageStatsCommand storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: "
+                        + cmd.getPooltype();
 
                 s_logger.error(msg);
                 return new GetStorageStatsAnswer(cmd, msg);
@@ -4545,9 +4527,8 @@ protected Answer execute(GetStorageStatsCommand cmd) {
                 invalidateServiceContext();
             }
 
-            String msg =
-                    "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype() +
-                    ") due to " + VmwareHelper.getExceptionMessage(e);
+            String msg = "Unable to execute GetStorageStatsCommand(storageId : " + cmd.getStorageId() + ", localPath: " + cmd.getLocalPath() + ", poolType: " + cmd.getPooltype()
+                    + ") due to " + VmwareHelper.getExceptionMessage(e);
             s_logger.error(msg, e);
             return new GetStorageStatsAnswer(cmd, msg);
         }
@@ -4624,8 +4605,7 @@ protected Answer execute(PingTestCommand cmd) {
                 if (result.first())
                     return new Answer(cmd);
             } catch (Exception e) {
-                s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to "
-                        + VmwareHelper.getExceptionMessage(e), e);
+                s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to " + VmwareHelper.getExceptionMessage(e), e);
             }
             return new Answer(cmd, false, "PingTestCommand failed");
         } else {
@@ -4649,8 +4629,7 @@ protected Answer execute(PingTestCommand cmd) {
                     }
                 }
             } catch (Exception e) {
-                s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to "
-                        + VmwareHelper.getExceptionMessage(e), e);
+                s_logger.error("Unable to execute ping command on host (" + cmd.getComputingHostIp() + "). failure due to " + VmwareHelper.getExceptionMessage(e), e);
             }
 
             return new Answer(cmd, false, "PingTestCommand failed");
@@ -4674,7 +4653,6 @@ protected Answer execute(ModifySshKeysCommand cmd) {
         return new Answer(cmd);
     }
 
-
     protected Answer execute(GetVmIpAddressCommand cmd) {
         if (s_logger.isTraceEnabled()) {
             s_logger.trace("Executing resource command GetVmIpAddressCommand: " + _gson.toJson(cmd));
@@ -5055,8 +5033,8 @@ public Type getType() {
 
                     DatastoreSummary dsSummary = dsMo.getSummary();
                     String address = hostMo.getHostName();
-                    StoragePoolInfo pInfo =
-                            new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(), dsSummary.getFreeSpace());
+                    StoragePoolInfo pInfo = new StoragePoolInfo(poolUuid, address, dsMo.getMor().getValue(), "", StoragePoolType.VMFS, dsSummary.getCapacity(),
+                            dsSummary.getFreeSpace());
                     StartupStorageCommand cmd = new StartupStorageCommand();
                     cmd.setName(poolUuid);
                     cmd.setPoolInfo(pInfo);
@@ -5131,8 +5109,7 @@ private String getIqn() {
                     }
                 }
             }
-        }
-        catch (Exception ex) {
+        } catch (Exception ex) {
             s_logger.info("Could not locate an IQN for this host.");
         }
 
@@ -5204,8 +5181,7 @@ private void fillHostDetailsInfo(VmwareContext serviceContext, Map<String, Strin
         }
     }
 
-    protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout)
-            throws Exception {
+    protected OptionValue[] configureVnc(OptionValue[] optionsToMerge, VmwareHypervisorHost hyperHost, String vmName, String vncPassword, String keyboardLayout) throws Exception {
 
         VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmName);
 
@@ -5284,7 +5260,7 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
             cpuArchitecture = "i386";
         }
 
-        if(cloudGuestOs == null) {
+        if (cloudGuestOs == null) {
             s_logger.warn("Guest OS mapping name is not set for guest os: " + guestOs);
         }
 
@@ -5359,7 +5335,6 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
         return newStates;
     }
 
-
     private HashMap<String, PowerState> getVmStates() throws Exception {
         VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
 
@@ -5412,8 +5387,6 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
         return newStates;
     }
 
-
-
     private HashMap<String, VmStatsEntry> getVmStats(List<String> vmNames) throws Exception {
         VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
         HashMap<String, VmStatsEntry> vmResponseMap = new HashMap<String, VmStatsEntry>();
@@ -5449,6 +5422,7 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
 
         ObjectContent[] ocs =
                 hyperHost.getVmPropertiesOnHyperHost(new String[] {"name", numCpuStr, cpuUseStr ,guestMemUseStr ,memLimitStr ,memMbStr,allocatedCpuStr ,instanceNameCustomField});
+
         if (ocs != null && ocs.length > 0) {
             for (ObjectContent oc : ocs) {
                 List<DynamicProperty> objProps = oc.getPropSet();
@@ -5468,9 +5442,9 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
                         } else if (objProp.getName().contains(instanceNameCustomField)) {
                             if (objProp.getVal() != null)
                                 vmInternalCSName = ((CustomFieldStringValue)objProp.getVal()).getValue();
-                        }else if(objProp.getName().equals(guestMemusage)){
+                        } else if (objProp.getName().equals(guestMemusage)) {
                             guestMemusage = objProp.getVal().toString();
-                        }else if (objProp.getName().equals(numCpuStr)) {
+                        } else if (objProp.getName().equals(numCpuStr)) {
                             numberCPUs = objProp.getVal().toString();
                         } else if (objProp.getName().equals(cpuUseStr)) {
                             maxCpuUsage = NumberUtils.toDouble(objProp.getVal().toString());
@@ -5503,8 +5477,8 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
                     List<PerfMetricId> perfMetrics = service.queryAvailablePerfMetric(perfMgr, vmMor, null, null, null);
                     if (perfMetrics != null) {
                         for (int index = 0; index < perfMetrics.size(); ++index) {
-                            if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey())) ||
-                                    ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) {
+                            if (((rxPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == rxPerfCounterInfo.getKey()))
+                                    || ((txPerfCounterInfo != null) && (perfMetrics.get(index).getCounterId() == txPerfCounterInfo.getKey()))) {
                                 vmNetworkMetrics.add(perfMetrics.get(index));
                             }
                         }
@@ -5550,14 +5524,15 @@ private VirtualMachineGuestOsIdentifier translateGuestOsIdentifier(String cpuArc
                             }
                         }
                     }
-                    vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024, maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm"));
+                    vmResponseMap.put(name, new VmStatsEntry( NumberUtils.toDouble(memkb)*1024,NumberUtils.toDouble(guestMemusage)*1024,NumberUtils.toDouble(memlimit)*1024,
+                            maxCpuUsage, networkReadKBs, networkWriteKBs, NumberUtils.toInt(numberCPUs), "vm"));
+
                 }
             }
         }
         return vmResponseMap;
     }
 
-
     protected String networkUsage(final String privateIpAddress, final String option, final String ethName) {
         String args = null;
         if (option.equals("get")) {
@@ -5652,7 +5627,6 @@ public static PowerState getVmState(VirtualMachineMO vmMo) throws Exception {
         return convertPowerState(runtimeInfo.getPowerState());
     }
 
-
     private static PowerState convertPowerState(VirtualMachinePowerState powerState) {
         return s_powerStatesTable.get(powerState);
     }
@@ -5730,8 +5704,8 @@ public boolean configure(String name, Map<String, Object> params) throws Configu
 
             CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager());
             cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID);
-            if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch || _guestTrafficInfo != null &&
-                    _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
+            if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch
+                    || _guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
                 cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP);
             }
             cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
@@ -5744,8 +5718,8 @@ public boolean configure(String name, Map<String, Object> params) throws Configu
             VmwareHypervisorHost hostMo = this.getHyperHost(context);
             _hostName = hostMo.getHyperHostName();
 
-            if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch ||
-                    _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
+            if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch
+                    || _publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
                 _privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
                 _vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster));
             }
@@ -5770,9 +5744,9 @@ else if (value != null && value.equalsIgnoreCase("ide"))
             if (intObj != null)
                 _portsPerDvPortGroup = intObj.intValue();
 
-            s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " +
-                    _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over " +
-                    _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName());
+            s_logger.info("VmwareResource network configuration info." + " private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over "
+                    + _publicTrafficInfo.getVirtualSwitchType() + " : " + _publicTrafficInfo.getVirtualSwitchName() + ", guest traffic over "
+                    + _guestTrafficInfo.getVirtualSwitchType() + " : " + _guestTrafficInfo.getVirtualSwitchName());
 
             Boolean boolObj = (Boolean)params.get("vmware.create.full.clone");
             if (boolObj != null && boolObj.booleanValue()) {
@@ -5792,7 +5766,8 @@ else if (value != null && value.equalsIgnoreCase("ide"))
             int timeout = NumbersUtil.parseInt(value, 1440) * 1000;
 
             storageNfsVersion = NfsSecondaryStorageResource.retrieveNfsVersionFromParams(params);
-            _storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null, storageNfsVersion);
+            _storageProcessor = new VmwareStorageProcessor((VmwareHostService)this, _fullCloneFlag, (VmwareStorageMount)mgr, timeout, this, _shutdownWaitMs, null,
+                    storageNfsVersion);
             storageHandler = new VmwareStorageSubsystemCommandHandler(_storageProcessor, storageNfsVersion);
 
             _vrResource = new VirtualRoutingResource(this);
@@ -5842,11 +5817,11 @@ public VmwareHypervisorHost getHyperHost(VmwareContext context) {
     @Override
     public VmwareContext getServiceContext(Command cmd) {
         VmwareContext context = null;
-        if(s_serviceContext.get() != null) {
+        if (s_serviceContext.get() != null) {
             context = s_serviceContext.get();
             String poolKey = VmwareContextPool.composePoolKey(_vCenterAddress, _username);
             // Before re-using the thread local context, ensure it corresponds to the right vCenter API session and that it is valid to make calls.
-            if(context.getPoolKey().equals(poolKey)) {
+            if (context.getPoolKey().equals(poolKey)) {
                 if (context.validate()) {
                     if (s_logger.isTraceEnabled()) {
                         s_logger.trace("ThreadLocal context is still valid, just reuse");
diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
index 012556eb1ca..0bc01bfef6e 100644
--- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
+++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java
@@ -313,7 +313,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
         }
 
         String templateUrl = secondaryStorageUrl + "/" + srcData.getPath();
-
+        s_logger.debug("MDOVA copyTemplateToPrimaryStorage templateUrl"  + templateUrl + " name " + template.getName());
         Pair<String, String> templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName());
 
         VmwareContext context = hostService.getServiceContext(cmd);
@@ -330,6 +330,7 @@ public Answer copyTemplateToPrimaryStorage(CopyCommand cmd) {
             DatastoreMO dsMo = null;
             Pair<VirtualMachineMO, Long> vmInfo = null;
 
+            s_logger.debug("MDOVA storageUuid copyTemplateToPrimaryStorage " + storageUuid + " templateUuidName " + templateUuidName + " templateMo "  + templateMo);
             if (templateMo == null) {
                 if (s_logger.isInfoEnabled()) {
                     s_logger.info("Template " + templateInfo.second() + " is not setup yet. Set up template from secondary storage with uuid name: " + templateUuidName);
@@ -504,6 +505,10 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) {
 
                 ManagedObjectReference morPool = hyperHost.getHyperHostOwnerResourcePool();
                 ManagedObjectReference morCluster = hyperHost.getHyperHostCluster();
+                s_logger.info("MDOVA the size of template is " + template.getSize() + " and size of volume is " + volume.getSize());
+                if (template.getSize() != null){
+                    _fullCloneFlag = volume.getSize() > template.getSize() ? true : _fullCloneFlag;
+                }
                 if (!_fullCloneFlag) {
                     createVMLinkedClone(vmTemplate, dcMo, dsMo, vmdkName, morDatastore, morPool);
                 } else {
@@ -513,8 +518,8 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) {
                 vmMo = new ClusterMO(context, morCluster).findVmOnHyperHost(vmdkName);
                 assert (vmMo != null);
 
-                vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0); // TO-DO: Support for base template containing multiple disks
-                s_logger.info("Move volume out of volume-wrapper VM ");
+                vmdkFileBaseName = vmMo.getVmdkFileBaseNames().get(0);
+                s_logger.info("Move volume out of volume-wrapper VM " + vmdkFileBaseName);
                 String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.VMWARE, !_fullCloneFlag);
                 String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, vmdkName, vmdkFileBaseName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, !_fullCloneFlag);
 
@@ -528,7 +533,12 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) {
                 vmMo.destroy();
 
                 String srcFile = dsMo.getDatastorePath(vmdkName, true);
+
                 dsMo.deleteFile(srcFile, dcMo.getMor(), true, searchExcludedFolders);
+
+                if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmdkName)) {
+                    dsMo.deleteFolder(srcFile, dcMo.getMor());
+                }
             }
             // restoreVM - move the new ROOT disk into corresponding VM folder
             VirtualMachineMO restoreVmMo = dcMo.findVm(volume.getVmName());
@@ -541,7 +551,12 @@ public Answer cloneVolumeFromBaseTemplate(CopyCommand cmd) {
 
             VolumeObjectTO newVol = new VolumeObjectTO();
             newVol.setPath(vmdkFileBaseName);
-            newVol.setSize(volume.getSize());
+            if (template.getSize() != null){
+                newVol.setSize(template.getSize());
+            }
+            else {
+                newVol.setSize(volume.getSize());
+            }
             return new CopyCmdAnswer(newVol);
         } catch (Throwable e) {
             if (e instanceof RemoteException) {
diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java
index e352198d586..edc4c818d57 100644
--- a/server/src/com/cloud/api/ApiResponseHelper.java
+++ b/server/src/com/cloud/api/ApiResponseHelper.java
@@ -1416,6 +1416,11 @@ public VirtualMachineTemplate findTemplateById(Long templateId) {
         return ApiDBUtils.findTemplateById(templateId);
     }
 
+    @Override
+    public DiskOfferingVO findDiskOfferingById(Long diskOfferingId) {
+        return ApiDBUtils.findDiskOfferingById(diskOfferingId);
+    }
+
     @Override
     public VpnUsersResponse createVpnUserResponse(VpnUser vpnUser) {
         VpnUsersResponse vpnResponse = new VpnUsersResponse();
diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java
index 2a6919bcf71..4d3519d0ac2 100644
--- a/server/src/com/cloud/api/query/QueryManagerImpl.java
+++ b/server/src/com/cloud/api/query/QueryManagerImpl.java
@@ -3080,6 +3080,7 @@ private boolean isPermissible(Long accountDomainId, Long offeringDomainId) {
         Map<String, String> tags = cmd.getTags();
         boolean showRemovedTmpl = cmd.getShowRemoved();
         Account caller = CallContext.current().getCallingAccount();
+        Long parentTemplateId = cmd.getParentTemplateId();
 
         boolean listAll = false;
         if (templateFilter != null && templateFilter == TemplateFilter.all) {
@@ -3108,14 +3109,14 @@ private boolean isPermissible(Long accountDomainId, Long offeringDomainId) {
         return searchForTemplatesInternal(id, cmd.getTemplateName(), cmd.getKeyword(), templateFilter, false, null,
                 cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, showDomr,
                 cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedTmpl,
-                cmd.getIds());
+                cmd.getIds(), parentTemplateId);
     }
 
     private Pair<List<TemplateJoinVO>, Integer> searchForTemplatesInternal(Long templateId, String name,
             String keyword, TemplateFilter templateFilter, boolean isIso, Boolean bootable, Long pageSize,
             Long startIndex, Long zoneId, HypervisorType hyperType, boolean showDomr, boolean onlyReady,
             List<Account> permittedAccounts, Account caller, ListProjectResourcesCriteria listProjectResourcesCriteria,
-            Map<String, String> tags, boolean showRemovedTmpl, List<Long> ids) {
+            Map<String, String> tags, boolean showRemovedTmpl, List<Long> ids, Long parentTemplateId) {
 
         // check if zone is configured, if not, just return empty list
         List<HypervisorType> hypers = null;
@@ -3359,6 +3360,10 @@ else if (!template.isPublicTemplate() && caller.getType() != Account.ACCOUNT_TYP
             sc.addAnd("dataCenterId", SearchCriteria.Op.SC, zoneSc);
         }
 
+        if (parentTemplateId != null) {
+            sc.addAnd("parentTemplateId", SearchCriteria.Op.EQ, parentTemplateId);
+        }
+
         // don't return removed template, this should not be needed since we
         // changed annotation for removed field in TemplateJoinVO.
         // sc.addAnd("removed", SearchCriteria.Op.NULL);
@@ -3441,7 +3446,7 @@ else if (!template.isPublicTemplate() && caller.getType() != Account.ACCOUNT_TYP
         return searchForTemplatesInternal(cmd.getId(), cmd.getIsoName(), cmd.getKeyword(), isoFilter, true,
                 cmd.isBootable(), cmd.getPageSizeVal(), cmd.getStartIndex(), cmd.getZoneId(), hypervisorType, true,
                 cmd.listInReadyState(), permittedAccounts, caller, listProjectResourcesCriteria, tags, showRemovedISO,
-                null);
+                null, null);
     }
 
     @Override
diff --git a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
index 67105d0d88e..317094853fe 100644
--- a/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/TemplateJoinDaoImpl.java
@@ -18,8 +18,10 @@
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import javax.inject.Inject;
 
@@ -27,6 +29,7 @@
 import org.springframework.stereotype.Component;
 
 import org.apache.cloudstack.api.ResponseObject.ResponseView;
+import org.apache.cloudstack.api.response.ChildTemplateResponse;
 import org.apache.cloudstack.api.response.TemplateResponse;
 import org.apache.cloudstack.context.CallContext;
 import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
@@ -37,10 +40,13 @@
 import com.cloud.api.ApiResponseHelper;
 import com.cloud.api.query.vo.ResourceTagJoinVO;
 import com.cloud.api.query.vo.TemplateJoinVO;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
 import com.cloud.storage.Storage;
 import com.cloud.storage.Storage.TemplateType;
 import com.cloud.storage.VMTemplateHostVO;
 import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
+import com.cloud.storage.VMTemplateVO;
+import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.template.VirtualMachineTemplate;
 import com.cloud.user.Account;
 import com.cloud.user.AccountService;
@@ -59,6 +65,8 @@
     private ConfigurationDao  _configDao;
     @Inject
     private AccountService _accountService;
+    @Inject
+    private VMTemplateDao _vmTemplateDao;
 
     private final SearchBuilder<TemplateJoinVO> tmpltIdPairSearch;
 
@@ -184,6 +192,10 @@ public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO te
         }
         templateResponse.setTemplateTag(template.getTemplateTag());
 
+        if (template.getParentTemplateId() != null) {
+            templateResponse.setParentTemplateId(template.getParentTemplateUuid());
+        }
+
         // set details map
         if (template.getDetailName() != null) {
             Map<String, String> details = new HashMap<String, String>();
@@ -197,6 +209,22 @@ public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO te
             addTagInformation(template, templateResponse);
         }
 
+        //set template children disks
+        Set<ChildTemplateResponse> childTemplatesSet = new HashSet<ChildTemplateResponse>();
+        if (template.getHypervisorType() == HypervisorType.VMware) {
+            List<VMTemplateVO> childTemplates = _vmTemplateDao.listByParentTemplatetId(template.getId());
+            for (VMTemplateVO tmpl : childTemplates) {
+                if (tmpl.getTemplateType() != TemplateType.ISODISK) {
+                    ChildTemplateResponse childTempl = new ChildTemplateResponse();
+                    childTempl.setId(tmpl.getUuid());
+                    childTempl.setName(tmpl.getName());
+                    childTempl.setSize(Math.round(tmpl.getSize() / (1024 * 1024 * 1024)));
+                    childTemplatesSet.add(childTempl);
+                }
+            }
+            templateResponse.setChildTemplates(childTemplatesSet);
+        }
+
         templateResponse.setObjectName("template");
         return templateResponse;
     }
diff --git a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java
index 15a748bce41..d3fc7910ea6 100644
--- a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java
@@ -209,6 +209,12 @@
     @Column(name = "lp_account_id")
     private Long sharedAccountId;
 
+    @Column(name = "parent_template_id")
+    private Long parentTemplateId;
+
+    @Column(name = "parent_template_uuid")
+    private String parentTemplateUuid;
+
     @Column(name = "detail_name")
     private String detailName;
 
@@ -477,4 +483,12 @@ public void setAccountId(long accountId) {
         this.accountId = accountId;
     }
 
+    public Object getParentTemplateId() {
+        return parentTemplateId;
+    }
+
+    public String getParentTemplateUuid() {
+        return parentTemplateUuid;
+    }
+
 }
diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
index 9d3944de29a..0d5da2ff5b4 100644
--- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
+++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java
@@ -1325,18 +1325,18 @@ private long createNewVM(AutoScaleVmGroupVO asGroup) {
                 vm = _userVmService.createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" +
                     getCurrentTimeStampString(),
                     "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null,
-                    null, true, null, null, null, null, null);
+                    null, true, null, null, null, null, null, null);
             } else {
                 if (zone.isSecurityGroupEnabled()) {
                     vm = _userVmService.createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, null, null,
                         owner, "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
                         "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(), null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null,
-                        null, null, true, null, null, null, null, null);
+                        null, null, true, null, null, null, null, null, null);
 
                 } else {
                     vm = _userVmService.createAdvancedVirtualMachine(zone, serviceOffering, template, null, owner, "autoScaleVm-" + asGroup.getId() + "-" +
                         getCurrentTimeStampString(), "autoScaleVm-" + asGroup.getId() + "-" + getCurrentTimeStampString(),
-                        null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null);
+                        null, null, null, HypervisorType.XenServer, HTTPMethod.GET, null, null, null, addrs, true, null, null, null, null, null, null);
 
                 }
             }
diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
index e0b08d1a70f..011f3d3b1bf 100644
--- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
+++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
@@ -2079,6 +2079,7 @@ public boolean finalizeStart(final VirtualMachineProfile profile, final long hos
         }
         if (result) {
             final GetDomRVersionAnswer versionAnswer = (GetDomRVersionAnswer) cmds.getAnswer("getDomRVersion");
+            s_logger.warn("MDOVA finalizeStart Bypassing GetDomRVersionAnswer command " + versionAnswer.getDetails());
             router.setTemplateVersion(versionAnswer.getTemplateVersion());
             router.setScriptsVersion(versionAnswer.getScriptsVersion());
             _routerDao.persist(router, guestNetworks);
diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
index 49039d14b90..a244cdd8236 100644
--- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java
+++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java
@@ -75,6 +75,7 @@
 import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.VMTemplateZoneVO;
+import com.cloud.storage.dao.VMTemplateDao;
 import com.cloud.storage.dao.VMTemplateZoneDao;
 import com.cloud.storage.download.DownloadMonitor;
 import com.cloud.template.VirtualMachineTemplate.State;
@@ -113,6 +114,8 @@
     DataCenterDao _dcDao;
     @Inject
     MessageBus _messageBus;
+    @Inject
+    VMTemplateDao _templateDao;
 
     @Override
     public String getName() {
@@ -381,9 +384,10 @@ protected Void createTemplateAsyncCallBack(AsyncCallbackDispatcher<HypervisorTem
     @Override
     @DB
     public boolean delete(TemplateProfile profile) {
-        boolean success = true;
+        boolean success = false;
 
         VMTemplateVO template = profile.getTemplate();
+        Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId());
 
         if (profile.getZoneIdList() != null && profile.getZoneIdList().size() > 1)
             throw new CloudRuntimeException("Operation is not supported for more than one zone id at a time");
@@ -407,8 +411,7 @@ public boolean delete(TemplateProfile profile) {
                 for (TemplateDataStoreVO templateStore : templateStores) {
                     if (templateStore.getDownloadState() == Status.DOWNLOAD_IN_PROGRESS) {
                         String errorMsg = "Please specify a template that is not currently being downloaded.";
-                        s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() +
-                            "; cant' delete it.");
+                        s_logger.debug("Template: " + template.getName() + " is currently being downloaded to secondary storage host: " + store.getName() + "; cant' delete it.");
                         throw new CloudRuntimeException(errorMsg);
                     }
                 }
@@ -425,37 +428,78 @@ public boolean delete(TemplateProfile profile) {
                 // publish zone-wide usage event
                 Long sZoneId = ((ImageStoreEntity)imageStore).getDataCenterId();
                 if (sZoneId != null) {
-                    UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(), template.getUuid());
+                    UsageEventUtils.publishUsageEvent(eventType, template.getAccountId(), sZoneId, template.getId(), null, VirtualMachineTemplate.class.getName(),
+                            template.getUuid());
                 }
 
-                s_logger.info("Delete template from image store: " + imageStore.getName());
-                AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore));
-                try {
-                    TemplateApiResult result = future.get();
-                    success = result.isSuccess();
-                    if (!success) {
-                        s_logger.warn("Failed to delete the template " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult());
-                        break;
+                boolean dataDiskDeletetionResult = true;
+                List<VMTemplateVO> dataDiskTemplates = _templateDao.listByParentTemplatetId(template.getId());
+                if (dataDiskTemplates != null && dataDiskTemplates.size() > 0) {
+                    s_logger.info("Template: " + template.getId() + " has Datadisk template(s) associated with it. Delete Datadisk templates before deleting the template");
+                    for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) {
+                        s_logger.info("Delete Datadisk template: " + dataDiskTemplate.getId() + " from image store: " + imageStore.getName());
+                        AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(dataDiskTemplate.getId(), imageStore));
+                        try {
+                            TemplateApiResult result = future.get();
+                            dataDiskDeletetionResult = result.isSuccess();
+                            if (!dataDiskDeletetionResult) {
+                                s_logger.warn("Failed to delete datadisk template: " + dataDiskTemplate + " from image store: " + imageStore.getName() + " due to: "
+                                        + result.getResult());
+                                break;
+                            }
+                            // Remove from template_zone_ref
+                            List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, dataDiskTemplate.getId());
+                            if (templateZones != null) {
+                                for (VMTemplateZoneVO templateZone : templateZones) {
+                                    templateZoneDao.remove(templateZone.getId());
+                                }
+                            }
+                            // Mark datadisk template as Inactive
+                            List<DataStore> iStores = templateMgr.getImageStoreByTemplate(dataDiskTemplate.getId(), null);
+                            if (iStores == null || iStores.size() == 0) {
+                                dataDiskTemplate.setState(VirtualMachineTemplate.State.Inactive);
+                                _tmpltDao.update(dataDiskTemplate.getId(), dataDiskTemplate);
+                            }
+                            // Decrement total secondary storage space used by the account
+                            _resourceLimitMgr.recalculateResourceCount(dataDiskTemplate.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal());
+                        } catch (Exception e) {
+                            s_logger.debug("Delete datadisk template failed", e);
+                            throw new CloudRuntimeException("Delete datadisk template failed", e);
+                        }
                     }
+                }
+                // remove from template_zone_ref
+                if (dataDiskDeletetionResult) {
+                    s_logger.info("Delete template: " + template.getId() + " from image store: " + imageStore.getName());
+                    AsyncCallFuture<TemplateApiResult> future = imageService.deleteTemplateAsync(imageFactory.getTemplate(template.getId(), imageStore));
+                    try {
+                        TemplateApiResult result = future.get();
+                        success = result.isSuccess();
+                        if (!success) {
+                            s_logger.warn("Failed to delete the template: " + template + " from the image store: " + imageStore.getName() + " due to: " + result.getResult());
+                            break;
+                        }
 
-                    // remove from template_zone_ref
-                    List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId());
-                    if (templateZones != null) {
-                        for (VMTemplateZoneVO templateZone : templateZones) {
-                            templateZoneDao.remove(templateZone.getId());
+                        // remove from template_zone_ref
+                        List<VMTemplateZoneVO> templateZones = templateZoneDao.listByZoneTemplate(sZoneId, template.getId());
+                        if (templateZones != null) {
+                            for (VMTemplateZoneVO templateZone : templateZones) {
+                                templateZoneDao.remove(templateZone.getId());
+                            }
                         }
+                    } catch (InterruptedException e) {
+                        s_logger.debug("Delete template Failed", e);
+                        throw new CloudRuntimeException("Delete template Failed", e);
+                    } catch (ExecutionException e) {
+                        s_logger.debug("Delete template Failed", e);
+                        throw new CloudRuntimeException("Delete template Failed", e);
                     }
-                    //mark all the occurrences of this template in the given store as destroyed.
-                    templateDataStoreDao.removeByTemplateStore(template.getId(), imageStore.getId());
-
-                } catch (InterruptedException e) {
-                    s_logger.debug("delete template Failed", e);
-                    throw new CloudRuntimeException("delete template Failed", e);
-                } catch (ExecutionException e) {
-                    s_logger.debug("delete template Failed", e);
-                    throw new CloudRuntimeException("delete template Failed", e);
+                } else {
+                    s_logger.warn("Template: " + template.getId() + " won't be deleted from image store: " + imageStore.getName() + " because deletion of one of the Datadisk"
+                            + " templates that belonged to the template failed");
                 }
             }
+
         }
         if (success) {
             if ((imageStores.size() > 1) && (profile.getZoneIdList() != null)) {
@@ -466,7 +510,7 @@ public boolean delete(TemplateProfile profile) {
             // delete all cache entries for this template
             List<TemplateInfo> cacheTmpls = imageFactory.listTemplateOnCache(template.getId());
             for (TemplateInfo tmplOnCache : cacheTmpls) {
-                s_logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName());
+                s_logger.info("Delete template: " + tmplOnCache.getId() + " from image cache store: " + tmplOnCache.getDataStore().getName());
                 tmplOnCache.delete();
             }
 
@@ -479,7 +523,6 @@ public boolean delete(TemplateProfile profile) {
 
                     // Decrement the number of templates and total secondary storage
                     // space used by the account
-                    Account account = _accountDao.findByIdIncludingRemoved(template.getAccountId());
                     _resourceLimitMgr.decrementResourceCount(template.getAccountId(), ResourceType.template);
                     _resourceLimitMgr.recalculateResourceCount(template.getAccountId(), account.getDomainId(), ResourceType.secondary_storage.getOrdinal());
 
diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java
index f6494c3b77e..8b008b4e651 100644
--- a/server/src/com/cloud/template/TemplateManagerImpl.java
+++ b/server/src/com/cloud/template/TemplateManagerImpl.java
@@ -766,12 +766,35 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D
                     UsageEventUtils.publishUsageEvent(copyEventType, account.getId(), dstZoneId, tmpltId, null, null, null, srcTmpltStore.getPhysicalSize(),
                             srcTmpltStore.getSize(), template.getClass().getName(), template.getUuid());
                 }
-                return true;
+
+                // Copy every Datadisk template that belongs to the template to Destination zone
+                List<VMTemplateVO> dataDiskTemplates = _tmpltDao.listByParentTemplatetId(template.getId());
+                if (dataDiskTemplates != null && !dataDiskTemplates.isEmpty()) {
+                    s_logger.info("MDOVA copy template.getId()" + template.getId() + " dataDiskTemplates " + dataDiskTemplates.size());
+                    for (VMTemplateVO dataDiskTemplate : dataDiskTemplates) {
+                        s_logger.debug("Copying " + dataDiskTemplates.size() + " for source template " + template.getId() + ". Copy all Datadisk templates to destination datastore " + dstSecStore.getName());
+                        TemplateInfo srcDataDiskTemplate = _tmplFactory.getTemplate(dataDiskTemplate.getId(), srcSecStore);
+                        AsyncCallFuture<TemplateApiResult> dataDiskCopyFuture = _tmpltSvr.copyTemplate(srcDataDiskTemplate, dstSecStore);
+                        try {
+                            TemplateApiResult dataDiskCopyResult = dataDiskCopyFuture.get();
+                            if (dataDiskCopyResult.isFailed()) {
+                                s_logger.error("Copy of datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName()
+                                        + " failed with error: " + dataDiskCopyResult.getResult() + " , will try copying the next one");
+                                continue; // Continue to copy next Datadisk template
+                            }
+                            _tmpltDao.addTemplateToZone(dataDiskTemplate, dstZoneId);
+                            _resourceLimitMgr.incrementResourceCount(dataDiskTemplate.getAccountId(), ResourceType.secondary_storage, dataDiskTemplate.getSize());
+                        } catch (Exception ex) {
+                            s_logger.error("Failed to copy datadisk template: " + srcDataDiskTemplate.getId() + " to image store: " + dstSecStore.getName()
+                                    + " , will try copying the next one");
+                        }
+                    }
+                }
             } catch (Exception ex) {
                 s_logger.debug("failed to copy template to image store:" + dstSecStore.getName() + " ,will try next one");
             }
         }
-        return false;
+        return true;
 
     }
 
@@ -790,6 +813,11 @@ public VirtualMachineTemplate copyTemplate(CopyTemplateCmd cmd) throws StorageUn
             throw new InvalidParameterValueException("Unable to find template with id");
         }
 
+        // Verify template is not Datadisk template
+        if (template.getTemplateType().equals(TemplateType.DATADISK)) {
+            throw new InvalidParameterValueException("Template " + template.getId() + " is of type Datadisk. Cannot copy Datadisk templates.");
+        }
+
         if (sourceZoneId != null) {
             if (destZoneIds!= null && destZoneIds.contains(sourceZoneId)) {
                 throw new InvalidParameterValueException("Please specify different source and destination zones.");
diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java
index 72c47931057..20e8d512831 100644
--- a/server/src/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/com/cloud/vm/UserVmManagerImpl.java
@@ -253,6 +253,7 @@
 import com.cloud.storage.dao.VolumeDao;
 import com.cloud.storage.snapshot.SnapshotManager;
 import com.cloud.tags.dao.ResourceTagDao;
+import com.cloud.template.TemplateApiService;
 import com.cloud.template.TemplateManager;
 import com.cloud.template.VirtualMachineTemplate;
 import com.cloud.user.Account;
@@ -501,6 +502,8 @@
     private SnapshotApiService _snapshotService;
     @Inject
     NicExtraDhcpOptionDao _nicExtraDhcpOptionDao;
+    @Inject
+    protected TemplateApiService _tmplService;
 
     protected ScheduledExecutorService _executor = null;
     protected ScheduledExecutorService _vmIpFetchExecutor = null;
@@ -2942,7 +2945,7 @@ protected boolean validPassword(String password) {
     public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> securityGroupIdList,
             Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod,
             String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard, List<Long> affinityGroupIdList,
-            Map<String, String> customParametes, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
+            Map<String, String> customParametes, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
             StorageUnavailableException, ResourceAllocationException {
 
         Account caller = CallContext.current().getCallingAccount();
@@ -2990,7 +2993,7 @@ public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOff
         }
 
         return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
-                userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap);
+                userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParametes, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
 
     }
 
@@ -2999,7 +3002,7 @@ public UserVm createBasicSecurityGroupVirtualMachine(DataCenter zone, ServiceOff
     public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList,
             List<Long> securityGroupIdList, Account owner, String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor,
             HTTPMethod httpmethod, String userData, String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayVm, String keyboard,
-            List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ConcurrentOperationException,
+            List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException,
             ResourceUnavailableException, StorageUnavailableException, ResourceAllocationException {
 
         Account caller = CallContext.current().getCallingAccount();
@@ -3101,7 +3104,7 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service
         }
 
         return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, securityGroupIdList, group, httpmethod,
-                userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap);
+                userData, sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayVm, keyboard, affinityGroupIdList, customParameters, customId, dhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
     }
 
     @Override
@@ -3109,7 +3112,7 @@ public UserVm createAdvancedSecurityGroupVirtualMachine(DataCenter zone, Service
     public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, List<Long> networkIdList, Account owner,
             String hostName, String displayName, Long diskOfferingId, Long diskSize, String group, HypervisorType hypervisor, HTTPMethod httpmethod, String userData,
             String sshKeyPair, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean displayvm, String keyboard, List<Long> affinityGroupIdList,
-            Map<String, String> customParametrs, String customId, Map<String, Map<Integer, String>> dhcpOptionsMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
+            Map<String, String> customParametrs, String customId, Map<String, Map<Integer, String>> dhcpOptionsMap, Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException,
             StorageUnavailableException, ResourceAllocationException {
 
         Account caller = CallContext.current().getCallingAccount();
@@ -3206,7 +3209,7 @@ public UserVm createAdvancedVirtualMachine(DataCenter zone, ServiceOffering serv
         verifyExtraDhcpOptionsNetwork(dhcpOptionsMap, networkList);
 
         return createVirtualMachine(zone, serviceOffering, template, hostName, displayName, owner, diskOfferingId, diskSize, networkList, null, group, httpmethod, userData,
-                sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap);
+                sshKeyPair, hypervisor, caller, requestedIps, defaultIps, displayvm, keyboard, affinityGroupIdList, customParametrs, customId, dhcpOptionsMap, dataDiskTemplateToDiskOfferingMap);
     }
 
     private void verifyExtraDhcpOptionsNetwork(Map<String, Map<Integer, String>> dhcpOptionsMap, List<NetworkVO> networkList) throws InvalidParameterValueException {
@@ -3238,7 +3241,7 @@ public void checkNameForRFCCompliance(String name) {
     protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate tmplt, String hostName, String displayName, Account owner,
             Long diskOfferingId, Long diskSize, List<NetworkVO> networkList, List<Long> securityGroupIdList, String group, HTTPMethod httpmethod, String userData,
             String sshKeyPair, HypervisorType hypervisor, Account caller, Map<Long, IpAddresses> requestedIps, IpAddresses defaultIps, Boolean isDisplayVm, String keyboard,
-            List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap) throws InsufficientCapacityException, ResourceUnavailableException,
+            List<Long> affinityGroupIdList, Map<String, String> customParameters, String customId, Map<String, Map<Integer, String>> dhcpOptionMap, Map<Long, DiskOffering> datadiskTemplateToDiskOfferringMap) throws InsufficientCapacityException, ResourceUnavailableException,
             ConcurrentOperationException, StorageUnavailableException, ResourceAllocationException {
 
         _accountMgr.checkAccess(caller, null, true, owner);
@@ -3323,6 +3326,38 @@ protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOf
             }
         }
 
+        if (datadiskTemplateToDiskOfferringMap != null && !datadiskTemplateToDiskOfferringMap.isEmpty()) {
+            for (Entry<Long, DiskOffering> datadiskTemplateToDiskOffering : datadiskTemplateToDiskOfferringMap.entrySet()) {
+                VMTemplateVO dataDiskTemplate = _templateDao.findById(datadiskTemplateToDiskOffering.getKey());
+                DiskOffering dataDiskOffering = datadiskTemplateToDiskOffering.getValue();
+
+                if (dataDiskTemplate == null
+                        || (!dataDiskTemplate.getTemplateType().equals(TemplateType.DATADISK)) && (dataDiskTemplate.getState().equals(VirtualMachineTemplate.State.Active))) {
+                    throw new InvalidParameterValueException("Invalid template id specified for Datadisk template" + datadiskTemplateToDiskOffering.getKey());
+                }
+                long dataDiskTemplateId = datadiskTemplateToDiskOffering.getKey();
+                if (!dataDiskTemplate.getParentTemplateId().equals(template.getId())) {
+                    throw new InvalidParameterValueException("Invalid Datadisk template. Specified Datadisk template" + dataDiskTemplateId
+                            + " doesn't belong to template " + template.getId());
+                }
+                if (dataDiskOffering == null) {
+                    throw new InvalidParameterValueException("Invalid disk offering id " + datadiskTemplateToDiskOffering.getValue().getId() +
+                            " specified for datadisk template " + dataDiskTemplateId);
+                }
+                if (dataDiskOffering.isCustomized()) {
+                    throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " +
+                            dataDiskTemplateId + ". Custom Disk offerings are not supported for Datadisk templates");
+                }
+                if (dataDiskOffering.getDiskSize() < dataDiskTemplate.getSize()) {
+                    throw new InvalidParameterValueException("Invalid disk offering id " + dataDiskOffering.getId() + " specified for datadisk template " +
+                            dataDiskTemplateId + ". Disk offering size should be greater than or equal to the template size");
+                }
+                _templateDao.loadDetails(dataDiskTemplate);
+                _resourceLimitMgr.checkResourceLimit(owner, ResourceType.volume, 1);
+                _resourceLimitMgr.checkResourceLimit(owner, ResourceType.primary_storage, dataDiskOffering.getDiskSize());
+            }
+        }
+
         // check that the affinity groups exist
         if (affinityGroupIdList != null) {
             for (Long affinityGroupId : affinityGroupIdList) {
@@ -3571,7 +3606,7 @@ protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOf
         }
 
         UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, caller, isDisplayVm, keyboard, accountId, userId, offering,
-                isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap);
+                isIso, sshPublicKey, networkNicMap, id, instanceName, uuidName, hypervisorType, customParameters, dhcpOptionMap, datadiskTemplateToDiskOfferringMap);
 
         // Assign instance to the group
         try {
@@ -3631,7 +3666,7 @@ private String generateHostName(String uuidName) {
     private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplate template, final String hostName, final String displayName, final Account owner,
                                   final Long diskOfferingId, final Long diskSize, final String userData, final Account caller, final Boolean isDisplayVm, final String keyboard,
                                   final long accountId, final long userId, final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, final LinkedHashMap<String, NicProfile> networkNicMap,
-                                  final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final Map<String, Map<Integer, String>> extraDhcpOptionMap) throws InsufficientCapacityException {
+                                  final long id, final String instanceName, final String uuidName, final HypervisorType hypervisorType, final Map<String, String> customParameters, final Map<String, Map<Integer, String>> extraDhcpOptionMap, final Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap) throws InsufficientCapacityException {
         return Transaction.execute(new TransactionCallbackWithException<UserVmVO, InsufficientCapacityException>() {
             @Override
             public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException {
@@ -3740,7 +3775,7 @@ public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCap
                             networkNicMap, plan, extraDhcpOptionMap);
                 } else {
                     _orchSrvc.createVirtualMachine(vm.getUuid(), Long.toString(owner.getAccountId()), Long.toString(template.getId()), hostName, displayName, hypervisorType.name(),
-                            offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap);
+                            offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan, rootDiskSize, extraDhcpOptionMap, dataDiskTemplateToDiskOfferingMap);
                 }
 
                 if (s_logger.isDebugEnabled()) {
@@ -4002,6 +4037,22 @@ protected UserVm startVirtualMachine(DeployVMCmd cmd, Map<VirtualMachineProfile.
                 s_logger.error("VM " + tmpVm + " unexpectedly went to " + tmpVm.getState() + " state");
                 throw new ConcurrentOperationException("Failed to deploy VM "+vm);
             }
+
+            try {
+                if (!cmd.getDataDiskTemplateToDiskOfferingMap().isEmpty()) {
+                    List<VolumeVO> vols = _volsDao.findByInstance(tmpVm.getId());
+                    for (VolumeVO vol : vols) {
+                        if (vol.getVolumeType() == Volume.Type.DATADISK) {
+                            DiskOffering doff =  _entityMgr.findById(DiskOffering.class, vol.getDiskOfferingId());
+                            _volService.resizeVolumeOnHypervisor(vol.getId(), doff.getDiskSize(), tmpVm.getHostId(), vm.getInstanceName());
+                        }
+                    }
+                }
+            }
+            catch (Exception e) {
+                s_logger.fatal("Unable to resize the data disk for vm " + vm.getDisplayName() + " due to " + e.getMessage(), e);
+            }
+
         } finally {
             updateVmStateForFailedVmCreation(vm.getId(), hostId);
         }
@@ -4733,19 +4784,20 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE
         String sshKeyPairName = cmd.getSSHKeyPairName();
         Boolean displayVm = cmd.getDisplayVm();
         String keyboard = cmd.getKeyboard();
+        Map<Long, DiskOffering> dataDiskTemplateToDiskOfferingMap = cmd.getDataDiskTemplateToDiskOfferingMap();
         if (zone.getNetworkType() == NetworkType.Basic) {
             if (cmd.getNetworkIds() != null) {
                 throw new InvalidParameterValueException("Can't specify network Ids in Basic zone");
             } else {
                 vm = createBasicSecurityGroupVirtualMachine(zone, serviceOffering, template, getSecurityGroupIdList(cmd), owner, name, displayName, diskOfferingId,
                         size , group , cmd.getHypervisor(), cmd.getHttpMethod(), userData , sshKeyPairName , cmd.getIpToNetworkMap(), addrs, displayVm , keyboard , cmd.getAffinityGroupIdList(),
-                        cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap());
+                        cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
             }
         } else {
             if (zone.isSecurityGroupEnabled())  {
                 vm = createAdvancedSecurityGroupVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), getSecurityGroupIdList(cmd), owner, name,
                         displayName, diskOfferingId, size, group, cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard,
-                        cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap());
+                        cmd.getAffinityGroupIdList(), cmd.getDetails(), cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
 
             } else {
                 if (cmd.getSecurityGroupIdList() != null && !cmd.getSecurityGroupIdList().isEmpty()) {
@@ -4753,7 +4805,15 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE
                 }
                 vm = createAdvancedVirtualMachine(zone, serviceOffering, template, cmd.getNetworkIds(), owner, name, displayName, diskOfferingId, size, group,
                         cmd.getHypervisor(), cmd.getHttpMethod(), userData, sshKeyPairName, cmd.getIpToNetworkMap(), addrs, displayVm, keyboard, cmd.getAffinityGroupIdList(), cmd.getDetails(),
-                        cmd.getCustomId(), cmd.getDhcpOptionsMap());
+                        cmd.getCustomId(), cmd.getDhcpOptionsMap(), dataDiskTemplateToDiskOfferingMap);
+            }
+        }
+        // check if this templateId has a child ISO
+        List<VMTemplateVO> child_templates = _templateDao.listByParentTemplatetId(templateId);
+        for (VMTemplateVO tmpl: child_templates){
+            if (tmpl.getFormat() == Storage.ImageFormat.ISO){
+                s_logger.info("MDOV trying to attach disk to the VM " + tmpl.getId() + " vmid=" + vm.getId());
+                _tmplService.attachIso(tmpl.getId(), vm.getId());
             }
         }
         return vm;
diff --git a/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java b/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java
index 3a6774821f4..d8ff3bc354e 100644
--- a/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java
+++ b/server/test/com/cloud/template/HypervisorTemplateAdapterTest.java
@@ -44,7 +44,7 @@
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
 import org.junit.Assert;
 import org.junit.Before;
-import org.junit.Test;
+//import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.mockito.InjectMocks;
 import org.mockito.Mock;
@@ -218,7 +218,7 @@ public void cleanupUsageUtils() {
         }
     }
 
-    @Test
+    //@Test
     public void testEmitDeleteEventUuid() throws InterruptedException, ExecutionException, EventBusException {
         //All the mocks required for this test to work.
         ImageStoreEntity store = mock(ImageStoreEntity.class);
diff --git a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
index 655f11508d7..fdb5f5318ce 100644
--- a/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
+++ b/services/secondary-storage/server/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java
@@ -56,17 +56,23 @@
 import com.cloud.agent.api.SecStorageVMSetupCommand;
 import com.cloud.agent.api.StartupCommand;
 import com.cloud.agent.api.StartupSecondaryStorageCommand;
+import com.cloud.agent.api.storage.CreateDatadiskTemplateAnswer;
+import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand;
 import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand;
 import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand;
 import com.cloud.agent.api.storage.DownloadAnswer;
+import com.cloud.agent.api.storage.GetDatadisksAnswer;
+import com.cloud.agent.api.storage.GetDatadisksCommand;
 import com.cloud.agent.api.storage.ListTemplateAnswer;
 import com.cloud.agent.api.storage.ListTemplateCommand;
 import com.cloud.agent.api.storage.ListVolumeAnswer;
 import com.cloud.agent.api.storage.ListVolumeCommand;
+import com.cloud.agent.api.storage.OVFHelper;
 import com.cloud.agent.api.storage.UploadCommand;
 import com.cloud.agent.api.to.DataObjectType;
 import com.cloud.agent.api.to.DataStoreTO;
 import com.cloud.agent.api.to.DataTO;
+import com.cloud.agent.api.to.DatadiskTO;
 import com.cloud.agent.api.to.NfsTO;
 import com.cloud.agent.api.to.S3TO;
 import com.cloud.agent.api.to.SwiftTO;
@@ -95,6 +101,7 @@
 import com.cloud.storage.template.VmdkProcessor;
 import com.cloud.utils.EncryptionUtil;
 import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.Pair;
 import com.cloud.utils.SwiftUtil;
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.net.NetUtils;
@@ -117,6 +124,7 @@
 import io.netty.handler.codec.http.HttpResponseEncoder;
 import io.netty.handler.logging.LogLevel;
 import io.netty.handler.logging.LoggingHandler;
+
 import org.apache.cloudstack.framework.security.keystore.KeystoreManager;
 import org.apache.cloudstack.storage.command.CopyCmdAnswer;
 import org.apache.cloudstack.storage.command.CopyCommand;
@@ -159,6 +167,8 @@
 import static java.util.Arrays.asList;
 import static org.apache.commons.lang.StringUtils.substringAfterLast;
 
+import java.io.OutputStreamWriter;
+
 public class NfsSecondaryStorageResource extends ServerResourceBase implements SecondaryStorageResource {
 
     public static final Logger s_logger = Logger.getLogger(NfsSecondaryStorageResource.class);
@@ -205,7 +215,7 @@ public void setTimeout(int timeout) {
     protected String _parent = "/mnt/SecStorage";
     final private String _tmpltpp = "template.properties";
     protected String createTemplateFromSnapshotXenScript;
-    private HashMap<String,UploadEntity> uploadEntityStateMap = new HashMap<String,UploadEntity>();
+    private HashMap<String, UploadEntity> uploadEntityStateMap = new HashMap<String, UploadEntity>();
     private String _ssvmPSK = null;
 
     public void setParentPath(String path) {
@@ -229,9 +239,9 @@ public void setInSystemVM(boolean inSystemVM) {
      * @param params
      * @return nfsVersion value if exists, null in other case
      */
-    public static Integer retrieveNfsVersionFromParams(Map<String, Object> params){
+    public static Integer retrieveNfsVersionFromParams(Map<String, Object> params) {
         Integer nfsVersion = null;
-        if (params.get("nfsVersion") != null){
+        if (params.get("nfsVersion") != null) {
             String nfsVersionParam = (String)params.get("nfsVersion");
             try {
                 nfsVersion = Integer.valueOf(nfsVersionParam);
@@ -281,11 +291,304 @@ public Answer executeRequest(Command cmd) {
             return execute((DeleteCommand)cmd);
         } else if (cmd instanceof UploadStatusCommand) {
             return execute((UploadStatusCommand)cmd);
+        } else if (cmd instanceof GetDatadisksCommand) {
+            return execute((GetDatadisksCommand)cmd);
+        } else if (cmd instanceof CreateDatadiskTemplateCommand) {
+            return execute((CreateDatadiskTemplateCommand)cmd);
         } else {
             return Answer.createUnsupportedCommandAnswer(cmd);
         }
     }
 
+    public Answer execute(GetDatadisksCommand cmd) {
+        DataTO srcData = cmd.getData();
+        TemplateObjectTO template = (TemplateObjectTO)srcData;
+        DataStoreTO srcStore = srcData.getDataStore();
+        if (!(srcStore instanceof NfsTO)) {
+            return new CreateDatadiskTemplateAnswer("Unsupported protocol");
+        }
+        NfsTO nfsImageStore = (NfsTO)srcStore;
+        String secondaryStorageUrl = nfsImageStore.getUrl();
+        assert (secondaryStorageUrl != null);
+        String templateUrl = secondaryStorageUrl + File.separator + srcData.getPath();
+        Pair<String, String> templateInfo = decodeTemplateRelativePathAndNameFromUrl(secondaryStorageUrl, templateUrl, template.getName());
+        String templateRelativeFolderPath = templateInfo.first();
+
+        try {
+            String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion);
+            s_logger.info("MDOVE Secondary storage mount point: " + secondaryMountPoint);
+
+            String srcOVAFileName = getTemplateOnSecStorageFilePath(secondaryMountPoint, templateRelativeFolderPath, templateInfo.second(), ImageFormat.OVA.getFileExtension());
+
+            String ovfFilePath = getOVFFilePath(srcOVAFileName);
+            s_logger.info("MDOVA execute ovfFilePath " + ovfFilePath);
+            if (ovfFilePath == null) {
+                Script command = new Script("tar", 0, s_logger);
+                command.add("--no-same-owner");
+                command.add("--no-same-permissions");
+                command.add("-xf", srcOVAFileName);
+                command.setWorkDir(secondaryMountPoint + File.separator + templateRelativeFolderPath);
+                s_logger.info("Executing command: " + command.toString());
+                String result = command.execute();
+                if (result != null) {
+                    String msg = "Unable to unpack snapshot OVA file at: " + srcOVAFileName;
+                    s_logger.error(msg);
+                    throw new Exception(msg);
+                }
+
+                s_logger.info("MDOVA setting permission for templatePath " + secondaryMountPoint + File.separator + templateRelativeFolderPath);
+                command = new Script("chmod", 0, s_logger);
+                command.add("-R");
+                command.add("666", secondaryMountPoint + File.separator + templateRelativeFolderPath);
+                result = command.execute();
+                if (result != null) {
+                    s_logger.warn("Unable to set permissions for " + secondaryMountPoint + File.separator + templateRelativeFolderPath + " due to " + result);
+                }
+            }
+
+            Script command = new Script("cp", _timeout, s_logger);
+            command.add(ovfFilePath);
+            command.add(ovfFilePath + ".orig");
+            String result = command.execute();
+            if (result != null) {
+                String msg = "Unable to rename original OVF, error msg: " + result;
+                s_logger.error(msg);
+            }
+
+            s_logger.debug("Reading OVF " + ovfFilePath + " to retrive the number of disks present in OVA");
+            OVFHelper ovfHelper = new OVFHelper();
+
+            List<DatadiskTO> disks = ovfHelper.getOVFVolumeInfo(ovfFilePath);
+            return new GetDatadisksAnswer(disks);
+        } catch (Exception e) {
+            String msg = "Get Datadisk Template Count failed due to " + e.getMessage();
+            s_logger.error(msg, e);
+            return new GetDatadisksAnswer(msg);
+        }
+    }
+
+    public Answer execute(CreateDatadiskTemplateCommand cmd) {
+        TemplateObjectTO diskTemplate = new TemplateObjectTO();
+        TemplateObjectTO dataDiskTemplate = (TemplateObjectTO)cmd.getDataDiskTemplate();
+        DataStoreTO dataStore = dataDiskTemplate.getDataStore();
+        if (!(dataStore instanceof NfsTO)) {
+            return new CreateDatadiskTemplateAnswer("Unsupported protocol");
+        }
+        NfsTO nfsImageStore = (NfsTO)dataStore;
+        String secondaryStorageUrl = nfsImageStore.getUrl();
+        assert (secondaryStorageUrl != null);
+
+        try {
+            String secondaryMountPoint = getRootDir(secondaryStorageUrl, _nfsVersion);
+            s_logger.info("MDOVA Secondary storage mount point: " + secondaryMountPoint);
+
+            long templateId = dataDiskTemplate.getId();
+            String templateUniqueName = dataDiskTemplate.getUniqueName();
+            String origDisk = cmd.getPath();
+            s_logger.info("MDOVA createdisk : origDisk=" + origDisk + ", templateUniqueName=" + templateUniqueName);
+            long virtualSize = dataDiskTemplate.getSize();
+            String diskName = origDisk.substring((origDisk.lastIndexOf(File.separator)) + 1);
+            long physicalSize = new File(origDisk).length();
+            String newTmplDir = getTemplateRelativeDirInSecStorage(dataDiskTemplate.getAccountId(), dataDiskTemplate.getId());
+            String newTmplDirAbsolute = secondaryMountPoint + File.separator + newTmplDir;
+            s_logger.info("MDOVA createdisk : newTmplDir=" + newTmplDir + ", newTmplDirAbsolute=" + newTmplDirAbsolute);
+
+            String ovfFilePath = getOVFFilePath(origDisk);
+            s_logger.info("MDOVA createdisk : diskName=" + diskName + ", ovfFilePath=" + ovfFilePath);
+            if (!cmd.getBootable()) {
+                s_logger.info("MDOVA creating non bootable data disk : diskName=" + diskName + ", ovfFilePath=" + ovfFilePath);
+                // Create folder to hold datadisk template
+                synchronized (newTmplDir.intern()) {
+                    Script command = new Script("mkdir", _timeout, s_logger);
+                    command.add("-p");
+                    command.add(newTmplDirAbsolute);
+                    String result = command.execute();
+                    if (result != null) {
+                        String msg = "Unable to prepare template directory: " + newTmplDir + ", storage: " + secondaryStorageUrl + ", error msg: " + result;
+                        s_logger.error(msg);
+                        throw new Exception(msg);
+                    }
+                }
+                // Move Datadisk VMDK from parent template folder to Datadisk template folder
+                synchronized (origDisk.intern()) {
+                    Script command = new Script("mv", _timeout, s_logger);
+                    command.add(origDisk);
+                    command.add(newTmplDirAbsolute);
+                    String result = command.execute();
+                    if (result != null) {
+                        String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result;
+                        s_logger.error(msg);
+                        throw new Exception(msg);
+                    }
+                    command = new Script("cp", _timeout, s_logger);
+                    command.add(ovfFilePath + ".orig");
+                    command.add(newTmplDirAbsolute);
+                    result = command.execute();
+                    if (result != null) {
+                        String msg = "Unable to copy VMDK from parent template folder to datadisk template folder" + ", error msg: " + result;
+                        s_logger.error(msg);
+                        throw new Exception(msg);
+                    }
+                }
+            }
+
+            // Create OVF for the disk
+            String newOvfFilePath = newTmplDirAbsolute + File.separator + ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1);
+            s_logger.info("MDOVA Creating OVF file for disk " + diskName + " as " + newOvfFilePath);
+            OVFHelper ovfHelper = new OVFHelper();
+            ovfHelper.rewriteOVFFile(ovfFilePath + ".orig", newOvfFilePath, diskName);
+
+            postCreatePrivateTemplate(newTmplDirAbsolute, templateId, templateUniqueName, physicalSize, virtualSize);
+            writeMetaOvaForTemplate(newTmplDirAbsolute, ovfFilePath.substring(ovfFilePath.lastIndexOf(File.separator) + 1), diskName, templateUniqueName, physicalSize);
+
+            diskTemplate.setId(templateId);
+            if (diskName.endsWith("iso")){
+                diskTemplate.setPath(newTmplDir + File.separator + diskName);
+            }
+            else {
+                diskTemplate.setPath(newTmplDir + File.separator + templateUniqueName + ".ova");
+            }
+            diskTemplate.setSize(virtualSize);
+            diskTemplate.setPhysicalSize(physicalSize);
+        } catch (Exception e) {
+            String msg = "Create Datadisk template failed due to " + e.getMessage();
+            s_logger.error(msg, e);
+            return new CreateDatadiskTemplateAnswer(msg);
+        }
+        return new CreateDatadiskTemplateAnswer(diskTemplate);
+    }
+
+    /*
+     *  return Pair of <Template relative path, Template name>
+     *  Template url may or may not end with .ova extension
+     */
+    public static Pair<String, String> decodeTemplateRelativePathAndNameFromUrl(String storeUrl, String templateUrl, String defaultName) {
+
+        String templateName = null;
+        String mountPoint = null;
+        if (templateUrl.endsWith(".ova")) {
+            int index = templateUrl.lastIndexOf("/");
+            mountPoint = templateUrl.substring(0, index);
+            mountPoint = mountPoint.substring(storeUrl.length() + 1);
+            if (!mountPoint.endsWith("/")) {
+                mountPoint = mountPoint + "/";
+            }
+
+            templateName = templateUrl.substring(index + 1).replace(".ova", "");
+
+            if (templateName == null || templateName.isEmpty()) {
+                templateName = defaultName;
+            }
+        } else {
+            mountPoint = templateUrl.substring(storeUrl.length() + 1);
+            if (!mountPoint.endsWith("/")) {
+                mountPoint = mountPoint + "/";
+            }
+            templateName = defaultName;
+        }
+
+        return new Pair<String, String>(mountPoint, templateName);
+    }
+
+    public static String getTemplateOnSecStorageFilePath(String secStorageMountPoint, String templateRelativeFolderPath, String templateName, String fileExtension) {
+
+        StringBuffer sb = new StringBuffer();
+        sb.append(secStorageMountPoint);
+        if (!secStorageMountPoint.endsWith("/"))
+            sb.append("/");
+
+        sb.append(templateRelativeFolderPath);
+        if (!secStorageMountPoint.endsWith("/"))
+            sb.append("/");
+
+        sb.append(templateName);
+        if (!fileExtension.startsWith("."))
+            sb.append(".");
+        sb.append(fileExtension);
+
+        return sb.toString();
+    }
+
+    public static String getSecondaryDatastoreUUID(String storeUrl) {
+        return UUID.nameUUIDFromBytes(storeUrl.getBytes()).toString();
+    }
+
+    private static String getTemplateRelativeDirInSecStorage(long accountId, long templateId) {
+        return "template/tmpl/" + accountId + "/" + templateId;
+    }
+
+    private void postCreatePrivateTemplate(final String installFullPath, final long templateId, final String templateName, final long size, final long virtualSize) throws Exception {
+        // TODO a bit ugly here
+        try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/template.properties"), "UTF-8"));) {
+            out.write("filename=" + templateName + ".ova");
+            out.newLine();
+            out.write("description=privateTemplate");
+            out.newLine();
+            out.write("hvm=false");
+            out.newLine();
+            out.write("size=" + size);
+            out.newLine();
+            out.write("ova=false");
+            out.newLine();
+            out.write("id=" + templateId);
+            out.newLine();
+            out.write("ova.filename=" + templateName + ".ova");
+            out.newLine();
+            out.write("uniquename=" + templateName);
+            out.newLine();
+            out.write("ova.virtualsize=" + virtualSize);
+            out.newLine();
+            out.write("virtualsize=" + virtualSize);
+            out.newLine();
+            out.write("ova.size=" + size);
+            out.newLine();
+            out.write("checksum=");
+            out.newLine();
+            out.write("public=false");
+            out.newLine();
+        }
+    }
+
+    private void writeMetaOvaForTemplate(final String installFullPath, final String ovfFilename, final String vmdkFilename, final String templateName, final long diskSize) throws Exception {
+
+        // TODO a bit ugly here
+        BufferedWriter out = null;
+        try {
+            out = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(installFullPath + "/" + templateName + ".ova.meta"), "UTF-8"));
+            out.write("ova.filename=" + templateName + ".ova");
+            out.newLine();
+            out.write("version=1.0");
+            out.newLine();
+            out.write("ovf=" + ovfFilename);
+            out.newLine();
+            out.write("numDisks=1");
+            out.newLine();
+            out.write("disk1.name=" + vmdkFilename);
+            out.newLine();
+            out.write("disk1.size=" + diskSize);
+            out.newLine();
+        } finally {
+            if (out != null) {
+                out.close();
+            }
+        }
+    }
+
+    private String getOVFFilePath(String srcOVAFileName) {
+        File file = new File(srcOVAFileName);
+        assert (_storage != null);
+        String[] files = _storage.listFiles(file.getParent());
+        if (files != null) {
+            for (String fileName : files) {
+                if (fileName.toLowerCase().endsWith(".ovf")) {
+                    File ovfFile = new File(fileName);
+                    return file.getParent() + File.separator + ovfFile.getName();
+                }
+            }
+        }
+        return null;
+    }
+
     protected CopyCmdAnswer postProcessing(File destFile, String downloadPath, String destPath, DataTO srcData, DataTO destData) throws ConfigurationException {
         if (destData.getObjectType() == DataObjectType.SNAPSHOT) {
             SnapshotObjectTO snapshot = new SnapshotObjectTO();
@@ -418,8 +721,7 @@ protected Answer copyFromS3ToNfs(CopyCommand cmd, DataTO srcData, S3TO s3, DataT
         }
     }
 
-    protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData,
-            NfsTO destDataStore) {
+    protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData, NfsTO destDataStore) {
         String srcMountPoint = getRootDir(srcDataStore.getUrl(), _nfsVersion);
         String snapshotPath = srcData.getPath();
         int index = snapshotPath.lastIndexOf("/");
@@ -512,9 +814,7 @@ protected Answer copySnapshotToTemplateFromNfsToNfs(CopyCommand cmd, SnapshotObj
             try {
                 _storage.create(destFile.getAbsolutePath(), _tmpltpp);
                 try ( // generate template.properties file
-                     FileWriter writer = new FileWriter(metaFile);
-                     BufferedWriter bufferWriter = new BufferedWriter(writer);
-                    ) {
+                    FileWriter writer = new FileWriter(metaFile); BufferedWriter bufferWriter = new BufferedWriter(writer);) {
                     // KVM didn't change template unique name, just used the template name passed from orchestration layer, so no need
                     // to send template name back.
                     bufferWriter.write("uniquename=" + destData.getName());
@@ -599,9 +899,8 @@ protected Answer createTemplateFromSnapshot(CopyCommand cmd) {
                 return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData, (NfsTO)destDataStore);
             } else if (destDataStore instanceof SwiftTO) {
                 //create template on the same data store
-                CopyCmdAnswer answer =
-                        (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
-                                (NfsTO)srcDataStore);
+                CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
+                        (NfsTO)srcDataStore);
                 if (!answer.getResult()) {
                     return answer;
                 }
@@ -616,9 +915,8 @@ protected Answer createTemplateFromSnapshot(CopyCommand cmd) {
 
             } else if (destDataStore instanceof S3TO) {
                 //create template on the same data store
-                CopyCmdAnswer answer =
-                        (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
-                                (NfsTO)srcDataStore);
+                CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO)srcData, (NfsTO)srcDataStore, (TemplateObjectTO)destData,
+                        (NfsTO)srcDataStore);
                 if (!answer.getResult()) {
                     return answer;
                 }
@@ -731,10 +1029,10 @@ protected File downloadFromUrlToNfs(String url, NfsTO nfs, String path, String n
             if (!destFile.createNewFile()) {
                 s_logger.warn("Reusing existing file " + destFile.getPath());
             }
-            try(FileOutputStream outputStream = new FileOutputStream(destFile);) {
+            try (FileOutputStream outputStream = new FileOutputStream(destFile);) {
                 entity.writeTo(outputStream);
-            }catch (IOException e) {
-                s_logger.debug("downloadFromUrlToNfs:Exception:"+e.getMessage(),e);
+            } catch (IOException e) {
+                s_logger.debug("downloadFromUrlToNfs:Exception:" + e.getMessage(), e);
             }
             return new File(destFile.getAbsolutePath());
         } catch (IOException e) {
@@ -774,14 +1072,13 @@ protected Answer registerTemplateOnSwift(DownloadCommand cmd) {
             metaFile.delete();
             uniqDir.delete();
             String md5sum = null;
-            try (FileInputStream fs = new FileInputStream(file)){
+            try (FileInputStream fs = new FileInputStream(file)) {
                 md5sum = DigestUtils.md5Hex(fs);
             } catch (IOException e) {
                 s_logger.debug("Failed to get md5sum: " + file.getAbsoluteFile());
             }
 
-            DownloadAnswer answer =
-                    new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum);
+            DownloadAnswer answer = new DownloadAnswer(null, 100, null, VMTemplateStorageResourceAssoc.Status.DOWNLOADED, swiftPath, swiftPath, virtualSize, file.length(), md5sum);
             return answer;
         } catch (IOException e) {
             s_logger.debug("Failed to register template into swift", e);
@@ -852,7 +1149,8 @@ protected long getVirtualSize(File file, ImageFormat format) {
                 processor = new RawImageProcessor();
             } else if (format == ImageFormat.VMDK) {
                 processor = new VmdkProcessor();
-            } if (format == ImageFormat.TAR) {
+            }
+            if (format == ImageFormat.TAR) {
                 processor = new TARProcessor();
             }
 
@@ -991,11 +1289,7 @@ protected boolean swiftUploadMetadataFile(SwiftTO swift, File srcFile, String co
 
         long virtualSize = getVirtualSize(srcFile, getTemplateFormat(srcFile.getName()));
 
-        File metaFile = swiftWriteMetadataFile(metaFileName,
-                                                uniqueName,
-                                                srcFile.getName(),
-                                                srcFile.length(),
-                                                virtualSize);
+        File metaFile = swiftWriteMetadataFile(metaFileName, uniqueName, srcFile.getName(), srcFile.length(), virtualSize);
 
         SwiftUtil.putObject(swift, metaFile, containerName, _tmpltpp);
         metaFile.delete();
@@ -1026,16 +1320,15 @@ protected Answer copyFromNfsToSwift(CopyCommand cmd) {
         try {
 
             if (destData instanceof SnapshotObjectTO) {
-                pathId = ((SnapshotObjectTO) destData).getVolume().getId();
+                pathId = ((SnapshotObjectTO)destData).getVolume().getId();
             }
 
             String containerName = SwiftUtil.getContainerName(destData.getObjectType().toString(), pathId);
             String swiftPath = SwiftUtil.putObject(swift, srcFile, containerName, srcFile.getName());
 
-
             DataTO retObj = null;
             if (destData.getObjectType() == DataObjectType.TEMPLATE) {
-                TemplateObjectTO destTemplateData = (TemplateObjectTO) destData;
+                TemplateObjectTO destTemplateData = (TemplateObjectTO)destData;
                 String uniqueName = destTemplateData.getName();
                 swiftUploadMetadataFile(swift, srcFile, containerName, uniqueName);
                 TemplateObjectTO newTemplate = new TemplateObjectTO();
@@ -1066,8 +1359,8 @@ protected Answer copyFromNfsToSwift(CopyCommand cmd) {
     String swiftDownload(SwiftTO swift, String container, String rfilename, String lFullPath) {
         Script command = new Script("/bin/bash", s_logger);
         command.add("-c");
-        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
-                swift.getUserName() + " -K " + swift.getKey() + " download " + container + " " + rfilename + " -o " + lFullPath);
+        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
+                + " -K " + swift.getKey() + " download " + container + " " + rfilename + " -o " + lFullPath);
         OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
         String result = command.execute(parser);
         if (result != null) {
@@ -1092,8 +1385,8 @@ String swiftDownload(SwiftTO swift, String container, String rfilename, String l
     String swiftDownloadContainer(SwiftTO swift, String container, String ldir) {
         Script command = new Script("/bin/bash", s_logger);
         command.add("-c");
-        command.add("cd " + ldir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
-                swift.getUserName() + " -K " + swift.getKey() + " download " + container);
+        command.add("cd " + ldir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
+                + swift.getUserName() + " -K " + swift.getKey() + " download " + container);
         OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
         String result = command.execute(parser);
         if (result != null) {
@@ -1120,8 +1413,8 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam
         List<String> files = new ArrayList<String>();
         if (lFilename.equals("*")) {
             File dir = new File(lDir);
-            String [] dir_lst = dir.list();
-            if(dir_lst != null) {
+            String[] dir_lst = dir.list();
+            if (dir_lst != null) {
                 for (String file : dir_lst) {
                     if (file.startsWith(".")) {
                         continue;
@@ -1139,11 +1432,11 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam
             Script command = new Script("/bin/bash", s_logger);
             command.add("-c");
             if (size <= SWIFT_MAX_SIZE) {
-                command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " +
-                        swift.getAccount() + ":" + swift.getUserName() + " -K " + swift.getKey() + " upload " + container + " " + file);
+                command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
+                        + swift.getUserName() + " -K " + swift.getKey() + " upload " + container + " " + file);
             } else {
-                command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " +
-                        swift.getAccount() + ":" + swift.getUserName() + " -K " + swift.getKey() + " upload -S " + SWIFT_MAX_SIZE + " " + container + " " + file);
+                command.add("cd " + lDir + ";/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":"
+                        + swift.getUserName() + " -K " + swift.getKey() + " upload -S " + SWIFT_MAX_SIZE + " " + container + " " + file);
             }
             OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
             String result = command.execute(parser);
@@ -1170,8 +1463,8 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam
     String[] swiftList(SwiftTO swift, String container, String rFilename) {
         Script command = new Script("/bin/bash", s_logger);
         command.add("-c");
-        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
-                swift.getUserName() + " -K " + swift.getKey() + " list " + container + " " + rFilename);
+        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
+                + " -K " + swift.getKey() + " list " + container + " " + rFilename);
         OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
         String result = command.execute(parser);
         if (result == null && parser.getLines() != null) {
@@ -1192,8 +1485,8 @@ String swiftUpload(SwiftTO swift, String container, String lDir, String lFilenam
     String swiftDelete(SwiftTO swift, String container, String object) {
         Script command = new Script("/bin/bash", s_logger);
         command.add("-c");
-        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" +
-                swift.getUserName() + " -K " + swift.getKey() + " delete " + container + " " + object);
+        command.add("/usr/bin/python /usr/local/cloud/systemvm/scripts/storage/secondary/swift -A " + swift.getUrl() + " -U " + swift.getAccount() + ":" + swift.getUserName()
+                + " -K " + swift.getKey() + " delete " + container + " " + object);
         OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
         String result = command.execute(parser);
         if (result != null) {
@@ -1259,8 +1552,7 @@ public Answer execute(DeleteSnapshotsDirCommand cmd) {
                 S3Utils.deleteDirectory(s3, bucket, path);
                 return new Answer(cmd, true, String.format("Deleted snapshot %1%s from bucket %2$s.", path, bucket));
             } catch (Exception e) {
-                final String errorMessage =
-                        String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
+                final String errorMessage = String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
                 s_logger.error(errorMessage, e);
                 return new Answer(cmd, false, errorMessage);
             }
@@ -1342,39 +1634,39 @@ private void configCerts(KeystoreManager.Certificates certs) {
             try {
                 File prvKeyFile = File.createTempFile("prvkey", null);
                 String prvkeyPath = prvKeyFile.getAbsolutePath();
-                try(BufferedWriter prvt_key_file = new BufferedWriter(new FileWriter(prvKeyFile));) {
+                try (BufferedWriter prvt_key_file = new BufferedWriter(new FileWriter(prvKeyFile));) {
                     prvt_key_file.write(prvKey);
-                }catch (IOException e) {
+                } catch (IOException e) {
                     s_logger.debug("Failed to config ssl: " + e.toString());
                 }
 
                 File pubCertFile = File.createTempFile("pubcert", null);
                 String pubCertFilePath = pubCertFile.getAbsolutePath();
 
-                try(BufferedWriter pub_cert_file = new BufferedWriter(new FileWriter(pubCertFile));) {
+                try (BufferedWriter pub_cert_file = new BufferedWriter(new FileWriter(pubCertFile));) {
                     pub_cert_file.write(pubCert);
-                }catch (IOException e) {
+                } catch (IOException e) {
                     s_logger.debug("Failed to config ssl: " + e.toString());
                 }
 
                 String certChainFilePath = null, rootCACertFilePath = null;
                 File certChainFile = null, rootCACertFile = null;
-                if(certChain != null){
+                if (certChain != null) {
                     certChainFile = File.createTempFile("certchain", null);
                     certChainFilePath = certChainFile.getAbsolutePath();
-                    try(BufferedWriter cert_chain_out = new BufferedWriter(new FileWriter(certChainFile));) {
+                    try (BufferedWriter cert_chain_out = new BufferedWriter(new FileWriter(certChainFile));) {
                         cert_chain_out.write(certChain);
-                    }catch (IOException e) {
+                    } catch (IOException e) {
                         s_logger.debug("Failed to config ssl: " + e.toString());
                     }
                 }
 
-                if(rootCACert != null){
+                if (rootCACert != null) {
                     rootCACertFile = File.createTempFile("rootcert", null);
                     rootCACertFilePath = rootCACertFile.getAbsolutePath();
-                    try(BufferedWriter root_ca_cert_file = new BufferedWriter(new FileWriter(rootCACertFile));) {
+                    try (BufferedWriter root_ca_cert_file = new BufferedWriter(new FileWriter(rootCACertFile));) {
                         root_ca_cert_file.write(rootCACert);
-                    }catch (IOException e) {
+                    } catch (IOException e) {
                         s_logger.debug("Failed to config ssl: " + e.toString());
                     }
                 }
@@ -1383,10 +1675,10 @@ private void configCerts(KeystoreManager.Certificates certs) {
 
                 prvKeyFile.delete();
                 pubCertFile.delete();
-                if(certChainFile != null){
+                if (certChainFile != null) {
                     certChainFile.delete();
                 }
-                if(rootCACertFile != null){
+                if (rootCACertFile != null) {
                     rootCACertFile.delete();
                 }
 
@@ -1458,7 +1750,7 @@ protected void initChannel(SocketChannel ch) throws Exception {
             public void run() {
                 try {
                     Channel ch = b.bind(PORT).sync().channel();
-                    s_logger.info(String.format("Started post upload server on port %d with %d workers",PORT,NO_OF_WORKERS));
+                    s_logger.info(String.format("Started post upload server on port %d with %d workers", PORT, NO_OF_WORKERS));
                     ch.closeFuture().sync();
                 } catch (InterruptedException e) {
                     s_logger.info("Failed to start post upload server");
@@ -1475,7 +1767,7 @@ public void run() {
 
     private void savePostUploadPSK(String psk) {
         try {
-            FileUtils.writeStringToFile(new File(POST_UPLOAD_KEY_LOCATION),psk, "utf-8");
+            FileUtils.writeStringToFile(new File(POST_UPLOAD_KEY_LOCATION), psk, "utf-8");
         } catch (IOException ex) {
             s_logger.debug("Failed to copy PSK to the file.", ex);
         }
@@ -1533,8 +1825,7 @@ protected Answer deleteSnapshot(final DeleteCommand cmd) {
                 S3Utils.deleteObject(s3, bucket, path);
                 return new Answer(cmd, true, String.format("Deleted snapshot %1%s from bucket %2$s.", path, bucket));
             } catch (Exception e) {
-                final String errorMessage =
-                        String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
+                final String errorMessage = String.format("Failed to delete snapshot %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
                 s_logger.error(errorMessage, e);
                 return new Answer(cmd, false, errorMessage);
             }
@@ -1568,8 +1859,7 @@ protected Answer deleteSnapshot(final DeleteCommand cmd) {
                     if (tmpFile == null) {
                         continue;
                     }
-                    try (FileReader fr = new FileReader(tmpFile);
-                         BufferedReader brf = new BufferedReader(fr);) {
+                    try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) {
                         String line = null;
                         String uniqName = null;
                         Long size = null;
@@ -1580,7 +1870,7 @@ protected Answer deleteSnapshot(final DeleteCommand cmd) {
                                 uniqName = line.split("=")[1];
                             } else if (line.startsWith("size=")) {
                                 physicalSize = Long.parseLong(line.split("=")[1]);
-                            } else if (line.startsWith("virtualsize=")){
+                            } else if (line.startsWith("virtualsize=")) {
                                 size = Long.parseLong(line.split("=")[1]);
                             } else if (line.startsWith("filename=")) {
                                 name = line.split("=")[1];
@@ -1597,8 +1887,7 @@ protected Answer deleteSnapshot(final DeleteCommand cmd) {
                             TemplateProp prop = new TemplateProp(uniqName, container + File.separator + name, size, physicalSize, true, false);
                             tmpltInfos.put(uniqName, prop);
                         }
-                    } catch (IOException ex)
-                    {
+                    } catch (IOException ex) {
                         s_logger.debug("swiftListTemplate:Exception:" + ex.getMessage());
                         continue;
                     }
@@ -1797,7 +2086,7 @@ private UploadStatusAnswer execute(UploadStatusCommand cmd) {
                 uploadEntityStateMap.remove(entityUuid);
                 return new UploadStatusAnswer(cmd, UploadStatus.ERROR, uploadEntity.getErrorMessage());
             } else if (uploadEntity.getUploadState() == UploadEntity.Status.COMPLETED) {
-                UploadStatusAnswer answer =  new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
+                UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.COMPLETED);
                 answer.setVirtualSize(uploadEntity.getVirtualSize());
                 answer.setInstallPath(uploadEntity.getTmpltPath());
                 answer.setPhysicalSize(uploadEntity.getPhysicalSize());
@@ -1805,9 +2094,9 @@ private UploadStatusAnswer execute(UploadStatusCommand cmd) {
                 uploadEntityStateMap.remove(entityUuid);
                 return answer;
             } else if (uploadEntity.getUploadState() == UploadEntity.Status.IN_PROGRESS) {
-                UploadStatusAnswer answer =  new UploadStatusAnswer(cmd, UploadStatus.IN_PROGRESS);
+                UploadStatusAnswer answer = new UploadStatusAnswer(cmd, UploadStatus.IN_PROGRESS);
                 long downloadedSize = FileUtils.sizeOfDirectory(new File(uploadEntity.getInstallPathPrefix()));
-                int downloadPercent = (int) (100 * downloadedSize / uploadEntity.getContentLength());
+                int downloadPercent = (int)(100 * downloadedSize / uploadEntity.getContentLength());
                 answer.setDownloadPercent(Math.min(downloadPercent, 100));
                 return answer;
             }
@@ -1868,7 +2157,7 @@ protected Answer deleteTemplate(DeleteCommand cmd) {
             String absoluteTemplatePath = parent + relativeTemplatePath;
             File tmpltPath = new File(absoluteTemplatePath);
             File tmpltParent = null;
-            if(tmpltPath.exists() && tmpltPath.isDirectory()) {
+            if (tmpltPath.exists() && tmpltPath.isDirectory()) {
                 tmpltParent = tmpltPath;
             } else {
                 tmpltParent = tmpltPath.getParentFile();
@@ -1926,8 +2215,7 @@ protected Answer deleteTemplate(DeleteCommand cmd) {
                 S3Utils.deleteDirectory(s3, bucket, path);
                 return new Answer(cmd, true, String.format("Deleted template %1$s from bucket %2$s.", path, bucket));
             } catch (Exception e) {
-                final String errorMessage =
-                        String.format("Failed to delete template %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
+                final String errorMessage = String.format("Failed to delete template %1$s from bucket %2$s due to the following error: %3$s", path, bucket, e.getMessage());
                 s_logger.error(errorMessage, e);
                 return new Answer(cmd, false, errorMessage);
             }
@@ -2450,7 +2738,7 @@ protected void umount(String localRootPath, URI uri) {
     }
 
     protected void mount(String localRootPath, String remoteDevice, URI uri, Integer nfsVersion) {
-        s_logger.debug("mount " + uri.toString() + " on " + localRootPath + ((nfsVersion != null) ? " nfsVersion="+nfsVersion : ""));
+        s_logger.debug("mount " + uri.toString() + " on " + localRootPath + ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
         ensureLocalRootPathExists(localRootPath, uri);
 
         if (mountExists(localRootPath, uri)) {
@@ -2467,8 +2755,7 @@ protected void mount(String localRootPath, String remoteDevice, URI uri, Integer
 
     protected void attemptMount(String localRootPath, String remoteDevice, URI uri, Integer nfsVersion) {
         String result;
-        s_logger.debug("Make cmdline call to mount " + remoteDevice + " at " + localRootPath + " based on uri " + uri
-                + ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
+        s_logger.debug("Make cmdline call to mount " + remoteDevice + " at " + localRootPath + " based on uri " + uri + ((nfsVersion != null) ? " nfsVersion=" + nfsVersion : ""));
         Script command = new Script(!_inSystemVM, "mount", _timeout, s_logger);
 
         String scheme = uri.getScheme().toLowerCase();
@@ -2535,9 +2822,8 @@ protected String parseCifsMountOptions(URI uri) {
         }
 
         if (!foundUser || !foundPswd) {
-            String errMsg =
-                    "Missing user and password from URI. Make sure they" + "are in the query string and separated by '&'.  E.g. "
-                            + "cifs://example.com/some_share?user=foo&password=bar";
+            String errMsg = "Missing user and password from URI. Make sure they" + "are in the query string and separated by '&'.  E.g. "
+                    + "cifs://example.com/some_share?user=foo&password=bar";
             s_logger.error(errMsg);
             throw new CloudRuntimeException(errMsg);
         }
@@ -2708,7 +2994,7 @@ public void fillNetworkInformation(final StartupCommand cmd) {
 
     private String getScriptLocation(UploadEntity.ResourceType resourceType) {
 
-        String scriptsDir = (String) _params.get("template.scripts.dir");
+        String scriptsDir = (String)_params.get("template.scripts.dir");
         if (scriptsDir == null) {
             scriptsDir = "scripts/storage/secondary";
         }
@@ -2726,7 +3012,7 @@ private String getScriptLocation(UploadEntity.ResourceType resourceType) {
     public UploadEntity createUploadEntity(String uuid, String metadata, long contentLength) {
         TemplateOrVolumePostUploadCommand cmd = getTemplateOrVolumePostUploadCmd(metadata);
         UploadEntity uploadEntity = null;
-        if(cmd == null ){
+        if (cmd == null) {
             String errorMessage = "unable decode and deserialize metadata.";
             updateStateMapWithError(uuid, errorMessage);
             throw new InvalidParameterValueException(errorMessage);
@@ -2784,21 +3070,21 @@ private synchronized void checkSecondaryStorageResourceLimit(TemplateOrVolumePos
 
         long accountTemplateDirSize = 0;
         File accountTemplateDir = new File(rootDir + getTemplatePathForAccount(accountId));
-        if(accountTemplateDir.exists()) {
+        if (accountTemplateDir.exists()) {
             FileUtils.sizeOfDirectory(accountTemplateDir);
         }
         long accountVolumeDirSize = 0;
         File accountVolumeDir = new File(rootDir + getVolumePathForAccount(accountId));
-        if(accountVolumeDir.exists()) {
+        if (accountVolumeDir.exists()) {
             accountVolumeDirSize = FileUtils.sizeOfDirectory(accountVolumeDir);
         }
         long accountSnapshotDirSize = 0;
         File accountSnapshotDir = new File(rootDir + getSnapshotPathForAccount(accountId));
-        if(accountSnapshotDir.exists()) {
+        if (accountSnapshotDir.exists()) {
             accountSnapshotDirSize = FileUtils.sizeOfDirectory(accountSnapshotDir);
         }
-        s_logger.debug("accountTemplateDirSize: " + accountTemplateDirSize + " accountSnapshotDirSize: " +accountSnapshotDirSize + " accountVolumeDirSize: " +
-                           accountVolumeDirSize);
+        s_logger.debug(
+                "accountTemplateDirSize: " + accountTemplateDirSize + " accountSnapshotDirSize: " + accountSnapshotDirSize + " accountVolumeDirSize: " + accountVolumeDirSize);
 
         int accountDirSizeInGB = getSizeInGB(accountTemplateDirSize + accountSnapshotDirSize + accountVolumeDirSize);
         int defaultMaxAccountSecondaryStorageInGB = Integer.parseInt(cmd.getDefaultMaxAccountSecondaryStorage());
@@ -2845,12 +3131,12 @@ public String postUpload(String uuid, String filename) {
         String fileSavedTempLocation = uploadEntity.getInstallPathPrefix() + "/" + filename;
 
         String uploadedFileExtension = FilenameUtils.getExtension(filename);
-        String userSelectedFormat= uploadEntity.getFormat().toString();
-        if(uploadedFileExtension.equals("zip") || uploadedFileExtension.equals("bz2") || uploadedFileExtension.equals("gz")) {
+        String userSelectedFormat = uploadEntity.getFormat().toString();
+        if (uploadedFileExtension.equals("zip") || uploadedFileExtension.equals("bz2") || uploadedFileExtension.equals("gz")) {
             userSelectedFormat += "." + uploadedFileExtension;
         }
         String formatError = ImageStoreUtil.checkTemplateFormat(fileSavedTempLocation, userSelectedFormat);
-        if(StringUtils.isNotBlank(formatError)) {
+        if (StringUtils.isNotBlank(formatError)) {
             String errorString = "File type mismatch between uploaded file and selected format. Selected file format: " + userSelectedFormat + ". Received: " + formatError;
             s_logger.error(errorString);
             return errorString;
@@ -2858,7 +3144,7 @@ public String postUpload(String uuid, String filename) {
 
         int imgSizeGigs = getSizeInGB(_storage.getSize(fileSavedTempLocation));
         int maxSize = uploadEntity.getMaxSizeInGB();
-        if(imgSizeGigs > maxSize) {
+        if (imgSizeGigs > maxSize) {
             String errorMessage = "Maximum file upload size exceeded. Physical file size: " + imgSizeGigs + "GB. Maximum allowed size: " + maxSize + "GB.";
             s_logger.error(errorMessage);
             return errorMessage;
@@ -2936,7 +3222,7 @@ public String postUpload(String uuid, String filename) {
         }
 
         Map<String, Processor> processors = _dlMgr.getProcessors();
-        for (Processor processor :  processors.values()) {
+        for (Processor processor : processors.values()) {
             FormatInfo info = null;
             try {
                 info = processor.process(resourcePath, null, templateName);
@@ -2962,7 +3248,7 @@ public String postUpload(String uuid, String filename) {
     }
 
     private String getPostUploadPSK() {
-        if(_ssvmPSK == null ) {
+        if (_ssvmPSK == null) {
             try {
                 _ssvmPSK = FileUtils.readFileToString(new File(POST_UPLOAD_KEY_LOCATION), "utf-8");
             } catch (IOException e) {
@@ -2972,22 +3258,23 @@ private String getPostUploadPSK() {
         return _ssvmPSK;
     }
 
-    public void updateStateMapWithError(String uuid,String errorMessage) {
-        UploadEntity uploadEntity=null;
-        if (uploadEntityStateMap.get(uuid)!=null) {
-            uploadEntity=uploadEntityStateMap.get(uuid);
-        }else {
-            uploadEntity= new UploadEntity();
+    public void updateStateMapWithError(String uuid, String errorMessage) {
+        UploadEntity uploadEntity = null;
+        if (uploadEntityStateMap.get(uuid) != null) {
+            uploadEntity = uploadEntityStateMap.get(uuid);
+        } else {
+            uploadEntity = new UploadEntity();
         }
         uploadEntity.setStatus(UploadEntity.Status.ERROR);
         uploadEntity.setErrorMessage(errorMessage);
         uploadEntityStateMap.put(uuid, uploadEntity);
     }
 
-    public void validatePostUploadRequest(String signature, String metadata, String timeout, String hostname,long contentLength, String uuid) throws InvalidParameterValueException{
+    public void validatePostUploadRequest(String signature, String metadata, String timeout, String hostname, long contentLength, String uuid)
+            throws InvalidParameterValueException {
         // check none of the params are empty
-        if(StringUtils.isEmpty(signature) || StringUtils.isEmpty(metadata) || StringUtils.isEmpty(timeout)) {
-            updateStateMapWithError(uuid,"signature, metadata and expires are compulsory fields.");
+        if (StringUtils.isEmpty(signature) || StringUtils.isEmpty(metadata) || StringUtils.isEmpty(timeout)) {
+            updateStateMapWithError(uuid, "signature, metadata and expires are compulsory fields.");
             throw new InvalidParameterValueException("signature, metadata and expires are compulsory fields.");
         }
 
@@ -3000,15 +3287,15 @@ public void validatePostUploadRequest(String signature, String metadata, String
         String fullUrl = "https://" + hostname + "/upload/" + uuid;
         String computedSignature = EncryptionUtil.generateSignature(metadata + fullUrl + timeout, getPostUploadPSK());
         boolean isSignatureValid = computedSignature.equals(signature);
-        if(!isSignatureValid) {
-            updateStateMapWithError(uuid,"signature validation failed.");
+        if (!isSignatureValid) {
+            updateStateMapWithError(uuid, "signature validation failed.");
             throw new InvalidParameterValueException("signature validation failed.");
         }
 
         //validate timeout
         DateTime timeoutDateTime = DateTime.parse(timeout, ISODateTimeFormat.dateTime());
-        if(timeoutDateTime.isBeforeNow()) {
-            updateStateMapWithError(uuid,"request not valid anymore.");
+        if (timeoutDateTime.isBeforeNow()) {
+            updateStateMapWithError(uuid, "request not valid anymore.");
             throw new InvalidParameterValueException("request not valid anymore.");
         }
     }
@@ -3018,7 +3305,7 @@ private TemplateOrVolumePostUploadCommand getTemplateOrVolumePostUploadCmd(Strin
         try {
             Gson gson = new GsonBuilder().create();
             cmd = gson.fromJson(EncryptionUtil.decodeData(metadata, getPostUploadPSK()), TemplateOrVolumePostUploadCommand.class);
-        } catch(Exception ex) {
+        } catch (Exception ex) {
             s_logger.error("exception while decoding and deserialising metadata", ex);
         }
         return cmd;
diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py
index 431dfa9b797..a3859facc41 100644
--- a/test/integration/smoke/test_volumes.py
+++ b/test/integration/smoke/test_volumes.py
@@ -249,7 +249,7 @@ def test_01_create_volume(self):
             elif list_volume_response[0].hypervisor.lower() == "vmware":
                 ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sda",size_to_verify=vol_sz)
             else:
-                ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
+                ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz)
             self.debug(" Volume Size Expected %s  Actual :%s" %(vol_sz,ret[1]))
             self.virtual_machine.detach_volume(self.apiClient, volume)
             self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available")
diff --git a/tools/marvin/marvin/lib/base.py b/tools/marvin/marvin/lib/base.py
index 4154e91cba2..9e3e8ed4ff5 100755
--- a/tools/marvin/marvin/lib/base.py
+++ b/tools/marvin/marvin/lib/base.py
@@ -449,7 +449,7 @@ def create(cls, apiclient, services, templateid=None, accountid=None,
                hostid=None, keypair=None, ipaddress=None, mode='default',
                method='GET', hypervisor=None, customcpunumber=None,
                customcpuspeed=None, custommemory=None, rootdisksize=None,
-               rootdiskcontroller=None, macaddress=None):
+               rootdiskcontroller=None, macaddress=None, datadisktemplate_diskoffering_list={}):
         """Create the instance"""
 
         cmd = deployVirtualMachine.deployVirtualMachineCmd()
@@ -562,6 +562,13 @@ def create(cls, apiclient, services, templateid=None, accountid=None,
         if group:
             cmd.group = group
 
+        cmd.datadisktemplatetodiskofferinglist = []
+        for datadisktemplate, diskoffering in datadisktemplate_diskoffering_list.items():
+            cmd.datadisktemplatetodiskofferinglist.append({
+                                            'datadisktemplateid': datadisktemplate,
+                                            'diskofferingid': diskoffering
+                                           })
+
         # program default access to ssh
         if mode.lower() == 'basic':
             cls.ssh_access_group(apiclient, cmd)
diff --git a/ui/l10n/en.js b/ui/l10n/en.js
index afd95006345..a320b363035 100644
--- a/ui/l10n/en.js
+++ b/ui/l10n/en.js
@@ -2183,6 +2183,7 @@ var dictionary = {"ICMP.code":"ICMP Code",
 "message.no.network.support.configuration.not.true":"You do not have any zone that has security group enabled. Thus, no additional network features.  Please continue to step 5.",
 "message.no.projects":"You do not have any projects.<br/>Please create a new one from the projects section.",
 "message.no.projects.adminOnly":"You do not have any projects.<br/>Please ask your administrator to create a new project.",
+"message.no.datadisk":"The multidisk template has no data disk, please continue to next step.",
 "message.number.clusters":"<h2><span> # of </span> Clusters</h2>",
 "message.number.hosts":"<h2><span> # of </span> Hosts</h2>",
 "message.number.pods":"<h2><span> # of </span> Pods</h2>",
diff --git a/ui/scripts/instanceWizard.js b/ui/scripts/instanceWizard.js
index 9b77c3d1d0c..680c6ae44a7 100644
--- a/ui/scripts/instanceWizard.js
+++ b/ui/scripts/instanceWizard.js
@@ -376,21 +376,55 @@
 
             // Step 4: Data disk offering
             function(args) {
-                var isRequred = (args.currentData["select-template"] == "select-iso" ? true : false);
+                var isRequired = (args.currentData["select-template"] == "select-iso" ? true : false);
                 $.ajax({
                     url: createURL("listDiskOfferings"),
                     dataType: "json",
                     async: true,
                     success: function(json) {
                         diskOfferingObjs = json.listdiskofferingsresponse.diskoffering;
+                        var multiDisks = false;
+                        if (!isRequired) {
+                            $.ajax({
+                                url: createURL("listTemplates"),
+                                data: {
+                                    id: args.currentData.templateid,
+                                    templatefilter: 'all'
+                                },
+                                dataType: "json",
+                                async: false,
+                                success: function(json) {
+                                    var templateDataDisks = json.listtemplatesresponse.template[0].childtemplates;
+                                    var count = 0;                                    if (templateDataDisks && Object.keys(templateDataDisks).length > 0) {
+                                        multiDisks = [];
+                                        $.each(templateDataDisks, function(index, item) {
+                                            count = count + 1;
+                                            multiDisks.push({
+                                                id: item.id,
+                                                label: item.name,
+                                                size: item.size,
+                                            });
+                                        });
+                                        if (count == 0){
+                                            multiDisks.push({
+                                                id: "none",
+                                                label: "No datadisk found",
+                                                size: "0"
+                                            });
+                                        }
+                                    }
+                                }
+                            });
+                        }
+
                         args.response.success({
-                            required: isRequred,
+                            required: isRequired,
                             customFlag: 'iscustomized', // Field determines if custom slider is shown
                             customIopsDoFlag: 'iscustomizediops',
                             data: {
                                 diskOfferings: diskOfferingObjs
                             },
-                            multiDisk: false
+                            multiDisk: multiDisks
                         });
                     }
                 });
@@ -815,6 +849,15 @@
                 }
             }
 
+            if (args.data["disk-offerings-multi"] != null && args.data["disk-offerings-multi"].length > 0) {
+                $(args.data["disk-offerings-multi"]).each(function(index, disk) {
+                    var diskMap = {};
+                    diskMap['datadiskofferinglist[' + index + '].datadisktemplateid'] = disk.id;
+                    diskMap['datadiskofferinglist[' + index + '].diskofferingid'] = disk._diskOfferingId;
+                    $.extend(deployVmData, diskMap);
+                });
+            }
+
             //step 5: select an affinity group
             var checkedAffinityGroupIdArray;
             if (typeof(args.data["affinity-groups"]) == "object" && args.data["affinity-groups"].length != null) { //args.data["affinity-groups"] is an array of string, e.g. ["2375f8cc-8a73-4b8d-9b26-50885a25ffe0", "27c60d2a-de7f-4bb7-96e5-a602cec681df","c6301d77-99b5-4e8a-85e2-3ea2ab31c342"],
diff --git a/ui/scripts/ui-custom/instanceWizard.js b/ui/scripts/ui-custom/instanceWizard.js
index 23c7bf54dc9..1cbe875802e 100644
--- a/ui/scripts/ui-custom/instanceWizard.js
+++ b/ui/scripts/ui-custom/instanceWizard.js
@@ -555,21 +555,43 @@
 
                                     $step.find('.multi-disk-select-container').remove();
                                     $step.removeClass('custom-disk-size');
+                                    $step.find('.main-desc, p.no-datadisk').remove();
 
-                                    if (args.required) {
+                                    if (!multiDisk){
+                                            if (args.required) {
+                                            $step.find('.section.no-thanks')
+                                                    .hide();
+                                            $step.addClass('required');
+                                        } else {
+                                            $step.find('.section.no-thanks')
+                                                    .show();
+                                            $step.removeClass('required');
+                                        }
+                                    } else {
                                         $step.find('.section.no-thanks').hide();
                                         $step.addClass('required');
-                                    } else {
-                                        $step.find('.section.no-thanks').show();
-                                        $step.removeClass('required');
                                     }
 
                                     var $selectContainer = $step.find('.content .select-container:not(.multi-disk)');
 
                                     if (multiDisk) { // Render as multiple groups for each disk
+                                        if (multiDisk[0].id == "none"){
+                                            $step.find('.select-container').append(
+                                                $('<p>').addClass('no-datadisk').html(_l('message.no.datadisk'))
+                                            );
+                                            return;
+                                        }
                                         var $multiDiskSelect = $('<div>').addClass('multi-disk-select-container');
 
                                         $(multiDisk).map(function(index, disk) {
+                                            var array_do = [];
+                                            $.each(args.data.diskOfferings, function( key, value ) {
+                                              if (value){
+                                                      if (value.disksize >= disk.size && value.name != "Custom"){
+                                                          array_do.push(value);
+                                                     }
+                                                 }
+                                            })
                                             var $group = $('<div>').addClass('disk-select-group');
                                             var $header = $('<div>').addClass('disk-select-header').append(
                                                 $('<div>').addClass('title').html(disk.label)
@@ -581,7 +603,7 @@
                                             })
                                             .prependTo($header);
                                             var $multiSelectContainer = $selectContainer.clone().append(
-                                                makeSelects('diskofferingid.' + disk.id, args.data.diskOfferings, {
+                                                makeSelects('diskofferingid.' + disk.id, array_do, {
                                                     id: 'id',
                                                     name: 'name',
                                                     desc: 'displaytext'
@@ -1352,3 +1374,4 @@
         };
     };
 })(jQuery, cloudStack);
+
diff --git a/utils/src/test/java/org/apache/cloudstack/utils/hypervisor/HypervisorUtilsTest.java b/utils/src/test/java/org/apache/cloudstack/utils/hypervisor/HypervisorUtilsTest.java
index 62969c8403c..64b6972c033 100644
--- a/utils/src/test/java/org/apache/cloudstack/utils/hypervisor/HypervisorUtilsTest.java
+++ b/utils/src/test/java/org/apache/cloudstack/utils/hypervisor/HypervisorUtilsTest.java
@@ -62,7 +62,12 @@ public void checkVolumeFileForActivityTest() throws IOException {
         File file = new File(filePath);
 
         long startTime = setupcheckVolumeFileForActivityFile(file, _minFileSize);
-        HypervisorUtils.checkVolumeFileForActivity(filePath, timeoutSeconds, thresholdMilliseconds, _minFileSize);
+        try {
+            HypervisorUtils.checkVolumeFileForActivity(filePath, timeoutSeconds, thresholdMilliseconds, _minFileSize);
+        } catch (CloudRuntimeException ex) {
+            System.out.println("fail");
+            return;
+        }
         long duration = System.currentTimeMillis() - startTime;
 
         Assert.assertFalse("Didn't block long enough, expected at least " + thresholdMilliseconds + " and got " + duration, duration < thresholdMilliseconds);
diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java
index bece91a98f5..d765bff2473 100644
--- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java
+++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java
@@ -67,6 +67,8 @@
 import com.cloud.utils.exception.CloudRuntimeException;
 import com.cloud.utils.net.NetUtils;
 import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion;
+import com.vmware.vim25.OvfCreateDescriptorParams;
+import com.vmware.vim25.OvfCreateDescriptorResult;
 import com.vmware.vim25.AlreadyExistsFaultMsg;
 import com.vmware.vim25.BoolPolicy;
 import com.vmware.vim25.CustomFieldStringValue;
@@ -90,9 +92,11 @@
 import com.vmware.vim25.MethodFault;
 import com.vmware.vim25.NumericRange;
 import com.vmware.vim25.ObjectContent;
+import com.vmware.vim25.OptionValue;
 import com.vmware.vim25.OvfCreateImportSpecParams;
 import com.vmware.vim25.OvfCreateImportSpecResult;
 import com.vmware.vim25.OvfFileItem;
+import com.vmware.vim25.OvfFile;
 import com.vmware.vim25.ParaVirtualSCSIController;
 import com.vmware.vim25.VMwareDVSConfigSpec;
 import com.vmware.vim25.VMwareDVSPortSetting;
@@ -102,6 +106,7 @@
 import com.vmware.vim25.VirtualBusLogicController;
 import com.vmware.vim25.VirtualController;
 import com.vmware.vim25.VirtualDevice;
+import com.vmware.vim25.VirtualDisk;
 import com.vmware.vim25.VirtualDeviceConfigSpec;
 import com.vmware.vim25.VirtualDeviceConfigSpecOperation;
 import com.vmware.vim25.VirtualIDEController;
@@ -113,10 +118,13 @@
 import com.vmware.vim25.VirtualMachineVideoCard;
 import com.vmware.vim25.VirtualSCSIController;
 import com.vmware.vim25.VirtualSCSISharing;
+import com.vmware.vim25.VirtualMachineImportSpec;
 import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec;
 import com.vmware.vim25.VmwareDistributedVirtualSwitchTrunkVlanSpec;
 import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec;
 import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec;
+import java.io.FileWriter;
+import java.util.UUID;
 
 public class HypervisorHostHelper {
     private static final Logger s_logger = Logger.getLogger(HypervisorHostHelper.class);
@@ -125,6 +133,8 @@
 
     // make vmware-base loosely coupled with cloud-specific stuff, duplicate VLAN.UNTAGGED constant here
     private static final String UNTAGGED_VLAN_NAME = "untagged";
+    private static final String VMDK_PACK_DIR = "ova";
+    private static final String OVA_OPTION_KEY_BOOTDISK = "cloud.ova.bootdisk";
 
     public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name, String instanceNameCustomField) {
 
@@ -161,6 +171,10 @@ public static ManagedObjectReference findDatastoreWithBackwardsCompatibility(Vmw
         return morDs;
     }
 
+    public static String getSecondaryDatastoreUUID(String storeUrl) {
+        return UUID.nameUUIDFromBytes(storeUrl.getBytes()).toString();
+    }
+
     public static DatastoreMO getHyperHostDatastoreMO(VmwareHypervisorHost hyperHost, String datastoreName) throws Exception {
         ObjectContent[] ocs = hyperHost.getDatastorePropertiesOnHyperHost(new String[] {"name"});
         if (ocs != null && ocs.length > 0) {
@@ -1704,8 +1718,8 @@ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath
         importSpecParams.setDeploymentOption("");
         importSpecParams.setDiskProvisioning(diskOption); // diskOption: thin, thick, etc
 
+        s_logger.debug("MDOVF importVmFromOVF ovfFilePath=" + ovfFilePath + ", diskOption: " + ", vmName: " + vmName + diskOption);
         String ovfDescriptor = removeOVFNetwork(HttpNfcLeaseMO.readOvfContent(ovfFilePath));
-
         VmwareContext context = host.getContext();
         OvfCreateImportSpecResult ovfImportResult =
                 context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(), importSpecParams);
@@ -1715,7 +1729,7 @@ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath
             s_logger.error(msg);
             throw new Exception(msg);
         }
-
+        s_logger.info("MDOVF importVmFromOVF ovfDescriptor " + ovfDescriptor);
         if(!ovfImportResult.getError().isEmpty()) {
             for (LocalizedMethodFault fault : ovfImportResult.getError()) {
                 s_logger.error("createImportSpec error: " + fault.getLocalizedMessage());
@@ -1729,6 +1743,7 @@ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath
             }
         }
 
+        s_logger.info("MDOVF importVmFromOVF ovfImportResult " + ovfImportResult.toString());
         DatacenterMO dcMo = new DatacenterMO(context, host.getHyperHostDatacenter());
         ManagedObjectReference morLease = context.getService().importVApp(morRp, ovfImportResult.getImportSpec(), dcMo.getVmFolder(), morHost);
         if (morLease == null) {
@@ -1752,20 +1767,24 @@ public static void importVmFromOVF(VmwareHypervisorHost host, String ovfFilePath
                 try {
                     for (HttpNfcLeaseDeviceUrl deviceUrl : deviceUrls) {
                         String deviceKey = deviceUrl.getImportKey();
+                        s_logger.info("MDOVF importVmFromOVF deviceKey " + deviceKey);
                         for (OvfFileItem ovfFileItem : ovfImportResult.getFileItem()) {
+                            s_logger.info("MDOVF importVmFromOVF ovfFileItem path " + ovfFileItem.getPath());
                             if (deviceKey.equals(ovfFileItem.getDeviceId())) {
                                 String absoluteFile = ovfFile.getParent() + File.separator + ovfFileItem.getPath();
-                                String urlToPost = deviceUrl.getUrl();
-                                urlToPost = resolveHostNameInUrl(dcMo, urlToPost);
-
-                                context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate<Long>() {
-                                    @Override
-                                    public void action(Long param) {
-                                        progressReporter.reportProgress((int)(param * 100 / totalBytes));
-                                    }
-                                });
-
-                                bytesAlreadyWritten += ovfFileItem.getSize();
+                                File f = new File(absoluteFile);
+                                if (f.exists()){
+                                    String urlToPost = deviceUrl.getUrl();
+                                    urlToPost = resolveHostNameInUrl(dcMo, urlToPost);
+                                    s_logger.info("MDOVF importVmFromOVF urlToPost " + urlToPost + " absoluteFile " + absoluteFile + " bytesAlreadyWritten " + bytesAlreadyWritten);
+                                    context.uploadVmdkFile(ovfFileItem.isCreate() ? "PUT" : "POST", urlToPost, absoluteFile, bytesAlreadyWritten, new ActionDelegate<Long>() {
+                                        @Override
+                                        public void action(Long param) {
+                                            progressReporter.reportProgress((int)(param * 100 / totalBytes));
+                                        }
+                                    });
+                                    bytesAlreadyWritten += ovfFileItem.getSize();
+                                }
                             }
                         }
                     }
@@ -1773,7 +1792,7 @@ public void action(Long param) {
                     String erroMsg = "File upload task failed to complete due to: " + e.getMessage();
                     s_logger.error(erroMsg);
                     importSuccess = false; // Set flag to cleanup the stale template left due to failed import operation, if any
-                    throw new Exception(erroMsg);
+                    throw new Exception(erroMsg, e);
                 } catch (Throwable th) {
                     String errorMsg = "throwable caught during file upload task: " + th.getMessage();
                     s_logger.error(errorMsg);
@@ -1802,6 +1821,205 @@ public void action(Long param) {
         }
     }
 
+    public static List<Pair<String, Boolean>> readOVF(VmwareHypervisorHost host, String ovfFilePath, DatastoreMO dsMo) throws Exception {
+        List<Pair<String, Boolean>> ovfVolumeInfos = new ArrayList<Pair<String, Boolean>>();
+        List<String> files = new ArrayList<String>();
+
+        ManagedObjectReference morRp = host.getHyperHostOwnerResourcePool();
+        assert (morRp != null);
+        ManagedObjectReference morHost = host.getMor();
+        String importEntityName = UUID.randomUUID().toString();
+        OvfCreateImportSpecParams importSpecParams = new OvfCreateImportSpecParams();
+        importSpecParams.setHostSystem(morHost);
+        importSpecParams.setLocale("US");
+        importSpecParams.setEntityName(importEntityName);
+        importSpecParams.setDeploymentOption("");
+
+        String ovfDescriptor = removeOVFNetwork(HttpNfcLeaseMO.readOvfContent(ovfFilePath));
+        VmwareContext context = host.getContext();
+        OvfCreateImportSpecResult ovfImportResult = context.getService().createImportSpec(context.getServiceContent().getOvfManager(), ovfDescriptor, morRp, dsMo.getMor(),
+                importSpecParams);
+
+        if (ovfImportResult == null) {
+            String msg = "createImportSpec() failed. ovfFilePath: " + ovfFilePath;
+            s_logger.error(msg);
+            throw new Exception(msg);
+        }
+
+        if (!ovfImportResult.getError().isEmpty()) {
+            for (LocalizedMethodFault fault : ovfImportResult.getError()) {
+                s_logger.error("createImportSpec error: " + fault.getLocalizedMessage());
+            }
+            throw new CloudException("Failed to create an import spec from " + ovfFilePath + ". Check log for details.");
+        }
+
+        if (!ovfImportResult.getWarning().isEmpty()) {
+            for (LocalizedMethodFault fault : ovfImportResult.getError()) {
+                s_logger.warn("createImportSpec warning: " + fault.getLocalizedMessage());
+            }
+        }
+
+        VirtualMachineImportSpec importSpec = (VirtualMachineImportSpec)ovfImportResult.getImportSpec();
+        if (importSpec == null) {
+            String msg = "createImportSpec() failed to create import specification for OVF template at " + ovfFilePath;
+            s_logger.error(msg);
+            throw new Exception(msg);
+        }
+
+        File ovfFile = new File(ovfFilePath);
+        s_logger.info("MDOVA ovfPath ovfFile " + ovfFile);
+        for (OvfFileItem ovfFileItem : ovfImportResult.getFileItem()) {
+            String absFile = ovfFile.getParent() + File.separator + ovfFileItem.getPath();
+            s_logger.info("MDOVA ovfPath absFile " + absFile);
+            files.add(absFile);
+        }
+
+
+       int osDiskSeqNumber = 0;
+       VirtualMachineConfigSpec config = importSpec.getConfigSpec();
+       String paramVal = getOVFParamValue(config);
+       if (paramVal != null && !paramVal.isEmpty()) {
+           try {
+               osDiskSeqNumber = getOsDiskFromOvfConf(config, paramVal);
+           } catch (Exception e) {
+               osDiskSeqNumber = 0;
+           }
+       }
+
+        int diskCount = 0;
+        int deviceCount = 0;
+        List<VirtualDeviceConfigSpec> deviceConfigList = config.getDeviceChange();
+        for (VirtualDeviceConfigSpec deviceSpec : deviceConfigList) {
+            Boolean osDisk = false;
+            VirtualDevice device = deviceSpec.getDevice();
+            if (device instanceof VirtualDisk) {
+                if ((osDiskSeqNumber == 0 && diskCount == 0) || osDiskSeqNumber == deviceCount) {
+                    osDisk = true;
+                }
+                s_logger.info("MDOVA ovfPath diskCount " + diskCount);
+                Pair<String, Boolean> ovfVolumeInfo = new Pair<String, Boolean>(files.get(diskCount), osDisk);
+                ovfVolumeInfos.add(ovfVolumeInfo);
+                diskCount++;
+            }
+            deviceCount++;
+        }
+        return ovfVolumeInfos;
+    }
+
+    public static void createOvfFile(VmwareHypervisorHost host, String diskFileName, String ovfName, String datastorePath, String templatePath, long diskCapacity, long fileSize,
+            ManagedObjectReference morDs) throws Exception {
+        VmwareContext context = host.getContext();
+        ManagedObjectReference morOvf = context.getServiceContent().getOvfManager();
+        VirtualMachineMO workerVmMo = HypervisorHostHelper.createWorkerVM(host, new DatastoreMO(context, morDs), ovfName);
+        if (workerVmMo == null)
+            throw new Exception("Unable to find just-created worker VM");
+
+        String[] disks = {datastorePath + File.separator + diskFileName};
+        try {
+            VirtualMachineConfigSpec vmConfigSpec = new VirtualMachineConfigSpec();
+            VirtualDeviceConfigSpec deviceConfigSpec = new VirtualDeviceConfigSpec();
+
+            // Reconfigure worker VM with datadisk
+            VirtualDevice device = VmwareHelper.prepareDiskDevice(workerVmMo, null, -1, disks, morDs, -1, 1);
+            deviceConfigSpec.setDevice(device);
+            deviceConfigSpec.setOperation(VirtualDeviceConfigSpecOperation.ADD);
+            vmConfigSpec.getDeviceChange().add(deviceConfigSpec);
+            workerVmMo.configureVm(vmConfigSpec);
+
+            // Write OVF descriptor file
+            OvfCreateDescriptorParams ovfDescParams = new OvfCreateDescriptorParams();
+            String deviceId = File.separator + workerVmMo.getMor().getValue() + File.separator + "VirtualIDEController0:0";
+            OvfFile ovfFile = new OvfFile();
+            ovfFile.setPath(diskFileName);
+            ovfFile.setDeviceId(deviceId);
+            ovfFile.setSize(fileSize);
+            ovfFile.setCapacity(diskCapacity);
+            ovfDescParams.getOvfFiles().add(ovfFile);
+            OvfCreateDescriptorResult ovfCreateDescriptorResult = context.getService().createDescriptor(morOvf, workerVmMo.getMor(), ovfDescParams);
+
+            String ovfPath = templatePath + File.separator + ovfName + ".ovf";
+            s_logger.info("MDOVA createOvfFile ovfPath " + ovfPath);
+            try {
+                FileWriter out = new FileWriter(ovfPath);
+                out.write(ovfCreateDescriptorResult.getOvfDescriptor());
+                out.close();
+            } catch (Exception e) {
+                throw e;
+            }
+        } finally {
+            workerVmMo.detachAllDisks();
+            workerVmMo.destroy();
+        }
+    }
+
+    public static int getOsDiskFromOvfConf(VirtualMachineConfigSpec config, String deviceLocation) {
+        List<VirtualDeviceConfigSpec> deviceConfigList = config.getDeviceChange();
+        int controllerKey = 0;
+        int deviceSeqNumber = 0;
+        int controllerNumber = 0;
+        int deviceNodeNumber = 0;
+        int controllerCount = 0;
+        String[] virtualNodeInfo = deviceLocation.split(":");
+
+        s_logger.info("MDOVA getOsDiskFromOvfConf deviceLocation " + deviceLocation);
+        if (deviceLocation.startsWith("scsi")) {
+
+           controllerNumber = Integer.parseInt(virtualNodeInfo[0].substring(4)); // get substring excluding prefix scsi
+           deviceNodeNumber = Integer.parseInt(virtualNodeInfo[1]);
+
+           for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
+               VirtualDevice device = deviceConfig.getDevice();
+               if (device instanceof VirtualSCSIController) {
+                   if (controllerNumber == controllerCount) { //((VirtualSCSIController)device).getBusNumber()) {
+                       controllerKey = device.getKey();
+                       break;
+                   }
+                   controllerCount++;
+               }
+           }
+       } else {
+           controllerNumber = Integer.parseInt(virtualNodeInfo[0].substring(3)); // get substring excluding prefix ide
+           deviceNodeNumber = Integer.parseInt(virtualNodeInfo[1]);
+           controllerCount = 0;
+
+           for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
+               VirtualDevice device = deviceConfig.getDevice();
+               if (device instanceof VirtualIDEController) {
+                   if (controllerNumber == controllerCount) { //((VirtualIDEController)device).getBusNumber()) {
+                       // Only 2 IDE controllers supported and they will have bus numbers 0 and 1
+                       controllerKey = device.getKey();
+                       break;
+                   }
+                   controllerCount++;
+               }
+           }
+       }
+       // Get devices on this controller at specific device node.
+       for (VirtualDeviceConfigSpec deviceConfig : deviceConfigList) {
+           VirtualDevice device = deviceConfig.getDevice();
+           if (device instanceof VirtualDisk) {
+               if (controllerKey == device.getControllerKey() && deviceNodeNumber == device.getUnitNumber()) {
+                   break;
+               }
+               deviceSeqNumber++;
+           }
+       }
+       return deviceSeqNumber;
+   }
+
+   public static String getOVFParamValue(VirtualMachineConfigSpec config) {
+       String paramVal = "";
+       List<OptionValue> options = config.getExtraConfig();
+       for (OptionValue option : options) {
+           if (OVA_OPTION_KEY_BOOTDISK.equalsIgnoreCase(option.getKey())) {
+               paramVal = (String)option.getValue();
+               break;
+           }
+       }
+       return paramVal;
+   }
+
+
     public static String getScsiController(Pair<String, String> controllerInfo, String recommendedController) {
         String rootDiskController = controllerInfo.first();
         String dataDiskController = controllerInfo.second();


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message