cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From GitBox <...@apache.org>
Subject [GitHub] rhtyd commented on a change in pull request #2146: CLOUDSTACK-4757: Support OVA files with multiple disks for templates
Date Thu, 01 Jan 1970 00:00:00 GMT
rhtyd commented on a change in pull request #2146: CLOUDSTACK-4757: Support OVA files with
multiple disks for templates
URL: https://github.com/apache/cloudstack/pull/2146#discussion_r160731088
 
 

 ##########
 File path: api/src/com/cloud/agent/api/storage/OVFHelper.java
 ##########
 @@ -0,0 +1,333 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.agent.api.storage;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerException;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+
+import org.apache.log4j.Logger;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.SAXException;
+
+import com.cloud.agent.api.to.DatadiskTO;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+public class OVFHelper {
+    private static final Logger s_logger = Logger.getLogger(OVFHelper.class);
+
+    public List<DatadiskTO> getOVFVolumeInfo(final String ovfFilePath) {
+        if (ovfFilePath == null || ovfFilePath.isEmpty()) {
+            return null;
+        }
+        ArrayList<OVFFile> vf = new ArrayList<OVFFile>();
+        ArrayList<OVFDisk> vd = new ArrayList<OVFDisk>();
+
+        File ovfFile = new File(ovfFilePath);
+        try {
+            final Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(new
File(ovfFilePath));
+            NodeList disks = doc.getElementsByTagName("Disk");
+            NodeList files = doc.getElementsByTagName("File");
+            NodeList items = doc.getElementsByTagName("Item");
+            boolean toggle = true;
+            for (int j = 0; j < files.getLength(); j++) {
+                Element file = (Element)files.item(j);
+                OVFFile of = new OVFFile();
+                of._href = file.getAttribute("ovf:href");
+                if (of._href.endsWith("vmdk") || of._href.endsWith("iso")) {
+                    s_logger.info("MDOVA getOVFVolumeInfo File href = " + of._href);
+                    of._id = file.getAttribute("ovf:id");
+                    s_logger.info("MDOVA getOVFVolumeInfo File Id = " + of._id);
+                    String size = file.getAttribute("ovf:size");
+                    if (size != null && !size.isEmpty()) {
+                        of._size = Long.parseLong(size);
+                    } else {
+                        String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
+                        File this_file = new File(dataDiskPath);
+                        of._size = this_file.length();
+                    }
+                    of._iso = of._href.endsWith("iso");
+                    if (toggle && !of._iso) {
+                        of._bootable = true;
+                        toggle = !toggle;
+                    }
+                    vf.add(of);
+                }
+            }
+            for (int i = 0; i < disks.getLength(); i++) {
+                Element disk = (Element)disks.item(i);
+                OVFDisk od = new OVFDisk();
+                String virtualSize = disk.getAttribute("ovf:capacity");
+                if (virtualSize == null || virtualSize.isEmpty()) {
+                    od._capacity = 0L;
+                } else {
+                    od._capacity = Long.parseLong(virtualSize);
+                }
+                String allocationUnits = disk.getAttribute("ovf:capacityAllocationUnits");
+                od._diskId = disk.getAttribute("ovf:diskId");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:diskId  = " + od._diskId);
+                od._fileRef = disk.getAttribute("ovf:fileRef");
+                s_logger.info("MDOVA getOVFVolumeInfo Disk ovf:fileRef  = " + od._fileRef);
+                od._populatedSize = Long.parseLong(disk.getAttribute("ovf:populatedSize")
== null ? "0" : disk.getAttribute("ovf:populatedSize"));
+                s_logger.info("MDOVA getOVFVolumeInfo Disk _populatedSize  = " + od._populatedSize);
+
+                if ((od._capacity != 0) && (allocationUnits != null)) {
+
+                    long units = 1;
+                    if (allocationUnits.equalsIgnoreCase("KB") || allocationUnits.equalsIgnoreCase("KiloBytes")
|| allocationUnits.equalsIgnoreCase("byte * 2^10")) {
+                        units = 1024;
+                    } else if (allocationUnits.equalsIgnoreCase("MB") || allocationUnits.equalsIgnoreCase("MegaBytes")
|| allocationUnits.equalsIgnoreCase("byte * 2^20")) {
+                        units = 1024 * 1024;
+                    } else if (allocationUnits.equalsIgnoreCase("GB") || allocationUnits.equalsIgnoreCase("GigaBytes")
|| allocationUnits.equalsIgnoreCase("byte * 2^30")) {
+                        units = 1024 * 1024 * 1024;
+                    }
+                    od._capacity = od._capacity * units;
+                    s_logger.info("MDOVA getOVFVolumeInfo Disk _capacity  = " + od._capacity);
+                }
+                od._controller = getControllerType(items, od._diskId);
+                vd.add(od);
+            }
+
+        } catch (SAXException | IOException | ParserConfigurationException e) {
+            s_logger.error("Unexpected exception caught while parsing ovf file:" + ovfFilePath,
e);
+            throw new CloudRuntimeException(e);
+        }
+
+        List<DatadiskTO> disksTO = new ArrayList<DatadiskTO>();
+        for (OVFFile of : vf) {
+            if (of._id == null || of._id.isEmpty()){
+                s_logger.error("The ovf file info is incomplete file info");
+                throw new CloudRuntimeException("The ovf file info has incomplete file info");
+            }
+            OVFDisk cdisk = getDisk(of._id, vd);
+            if (cdisk == null && !of._iso){
+                s_logger.error("The ovf file info has incomplete disk info");
+                throw new CloudRuntimeException("The ovf file info has incomplete disk info");
+            }
+            Long capacity = cdisk == null ? of._size : cdisk._capacity;
+            String controller = cdisk == null ? "" : cdisk._controller._name;
+            String controllerSubType = cdisk == null ? "" : cdisk._controller._subType;
+            String dataDiskPath = ovfFile.getParent() + File.separator + of._href;
+            s_logger.info("MDOVA getOVFVolumeInfo diskName = " + of._href + ", dataDiskPath
= " + dataDiskPath);
+            File f = new File(dataDiskPath);
+            if (!f.exists() || f.isDirectory()) {
+                s_logger.error("One of the attached disk or iso does not exists " + dataDiskPath);
+                throw new CloudRuntimeException("One of the attached disk or iso as stated
on OVF does not exists " + dataDiskPath);
+            }
+            disksTO.add(new DatadiskTO(dataDiskPath, capacity, of._size, of._id, of._iso,
of._bootable, controller, controllerSubType));
+        }
+        //check if first disk is an iso move it to the end
+        DatadiskTO fd = disksTO.get(0);
+        if (fd.isIso()) {
+            disksTO.remove(0);
+            disksTO.add(fd);
+        }
+        return disksTO;
+    }
+
+    private OVFDiskController getControllerType(final NodeList itemList, final String diskId)
{
 
 Review comment:
   @rafaelweingartner I think we expored but did not find any opensource ovf reading/writing/editing
library. This implements a subset of a general ovf manupulating logic to support multidisk+iso
ova. I think we can refactor this as next steps and explore options in future.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

Mime
View raw message