cloudstack-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From mlsoren...@apache.org
Subject [2/3] CLOUDSTACK-6191 Add support for specifying volume provisioning type (thin, sparse, fat) in disk/compute offerings.
Date Thu, 10 Apr 2014 15:28:59 GMT
http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
index b13fe65..fb35d45 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
@@ -21,6 +21,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.UUID;
+import java.util.HashMap;
+
 
 import org.apache.commons.codec.binary.Base64;
 import org.apache.log4j.Logger;
@@ -55,6 +57,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolXMLParser;
 import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeDef;
 import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeDef.volFormat;
 import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeXMLParser;
+import com.cloud.storage.Storage;
 import com.cloud.storage.Storage.StoragePoolType;
 import com.cloud.storage.StorageLayer;
 import com.cloud.utils.exception.CloudRuntimeException;
@@ -614,16 +617,103 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
     }
 
     @Override
-    public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, long size) {
-        LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool;
+    public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
+            PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+
+        switch (pool.getType()){
+            case RBD:
+                return createPhysicalDiskOnRBD(name, pool, format, provisioningType, size);
+            case NetworkFilesystem:
+            case Filesystem:
+                switch (format){
+                    case QCOW2:
+                        return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size);
+                    case RAW:
+                        return createPhysicalDiskByQemuImg(name, pool, format, provisioningType, size);
+                    case DIR:
+                        return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
+                    case TAR:
+                        return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
+                    default:
+                        throw new CloudRuntimeException("Unexpected disk format is specified.");
+                }
+            default:
+                return createPhysicalDiskByLibVirt(name, pool, format, provisioningType, size);
+        }
+    }
+
+    private KVMPhysicalDisk createPhysicalDiskByLibVirt(String name, KVMStoragePool pool,
+                                                        PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+        LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
         StoragePool virtPool = libvirtPool.getPool();
-        LibvirtStorageVolumeDef.volFormat libvirtformat = null;
+        LibvirtStorageVolumeDef.volFormat libvirtformat = LibvirtStorageVolumeDef.volFormat.getFormat(format);
 
         String volPath = null;
         String volName = null;
         long volAllocation = 0;
         long volCapacity = 0;
 
+        LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(name,
+                size, libvirtformat, null, null);
+        s_logger.debug(volDef.toString());
+        try {
+            StorageVol vol = virtPool.storageVolCreateXML(volDef.toString(), 0);
+            volPath = vol.getPath();
+            volName = vol.getName();
+            volAllocation = vol.getInfo().allocation;
+            volCapacity = vol.getInfo().capacity;
+        } catch (LibvirtException e) {
+            throw new CloudRuntimeException(e.toString());
+        }
+
+        KVMPhysicalDisk disk = new KVMPhysicalDisk(volPath, volName, pool);
+        disk.setFormat(format);
+        disk.setSize(volAllocation);
+        disk.setVirtualSize(volCapacity);
+        return disk;
+    }
+
+
+    private KVMPhysicalDisk createPhysicalDiskByQemuImg(String name, KVMStoragePool pool,
+                                                    PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+        String volPath = pool.getLocalPath() + "/" + name;
+        String volName = name;
+        long volAllocation = 0;
+        long volCapacity = 0;
+
+        final int timeout = 0;
+
+        QemuImgFile destFile = new QemuImgFile(volPath);
+        destFile.setFormat(format);
+        destFile.setSize(size);
+        QemuImg qemu = new QemuImg(timeout);
+        Map<String, String> options = new HashMap<String, String>();
+        if (pool.getType() == StoragePoolType.NetworkFilesystem){
+            options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString());
+        }
+
+        try{
+            qemu.create(destFile, options);
+            Map<String, String> info = qemu.info(destFile);
+            volAllocation = Long.parseLong(info.get(new String("virtual-size")));
+            volCapacity = Long.parseLong(info.get(new String("actual-size")));
+        } catch (QemuImgException e) {
+            s_logger.error("Failed to create " + volPath +
+                    " due to a failed executing of qemu-img: " + e.getMessage());
+        }
+
+        KVMPhysicalDisk disk = new KVMPhysicalDisk(volPath, volName, pool);
+        disk.setFormat(format);
+        disk.setSize(volAllocation);
+        disk.setVirtualSize(volCapacity);
+        return disk;
+    }
+
+    private KVMPhysicalDisk createPhysicalDiskOnRBD(String name, KVMStoragePool pool,
+                                               PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+        LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
+        String volPath = null;
+
         /**
          * To have RBD function properly we want RBD images of format 2
          * libvirt currently defaults to format 1
@@ -631,63 +721,34 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
          * For that reason we use the native RBD bindings to create the
          * RBD image until libvirt creates RBD format 2 by default
          */
-        if (pool.getType() == StoragePoolType.RBD) {
-            format = PhysicalDiskFormat.RAW;
-
-            try {
-                s_logger.info("Creating RBD image " + pool.getSourceDir() + "/" + name + " with size " + size);
-
-                Rados r = new Rados(pool.getAuthUserName());
-                r.confSet("mon_host", pool.getSourceHost() + ":" + pool.getSourcePort());
-                r.confSet("key", pool.getAuthSecret());
-                r.confSet("client_mount_timeout", "30");
-                r.connect();
-                s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
+        format = PhysicalDiskFormat.RAW;
 
-                IoCTX io = r.ioCtxCreate(pool.getSourceDir());
-                Rbd rbd = new Rbd(io);
-                rbd.create(name, size, rbdFeatures, rbdOrder);
-
-                r.ioCtxDestroy(io);
-            } catch (RadosException e) {
-                throw new CloudRuntimeException(e.toString());
-            } catch (RbdException e) {
-                throw new CloudRuntimeException(e.toString());
-            }
+        try {
+            s_logger.info("Creating RBD image " + pool.getSourceDir() + "/" + name + " with size " + size);
 
-            volPath = pool.getSourceDir() + "/" + name;
-            volName = name;
-            volCapacity = size;
-            volAllocation = size;
-        } else {
+            Rados r = new Rados(pool.getAuthUserName());
+            r.confSet("mon_host", pool.getSourceHost() + ":" + pool.getSourcePort());
+            r.confSet("key", pool.getAuthSecret());
+            r.confSet("client_mount_timeout", "30");
+            r.connect();
+            s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
 
-            if (format == PhysicalDiskFormat.QCOW2) {
-                libvirtformat = LibvirtStorageVolumeDef.volFormat.QCOW2;
-            } else if (format == PhysicalDiskFormat.RAW) {
-                libvirtformat = LibvirtStorageVolumeDef.volFormat.RAW;
-            } else if (format == PhysicalDiskFormat.DIR) {
-                libvirtformat = LibvirtStorageVolumeDef.volFormat.DIR;
-            } else if (format == PhysicalDiskFormat.TAR) {
-                libvirtformat = LibvirtStorageVolumeDef.volFormat.TAR;
-            }
+            IoCTX io = r.ioCtxCreate(pool.getSourceDir());
+            Rbd rbd = new Rbd(io);
+            rbd.create(name, size, this.rbdFeatures, this.rbdOrder);
 
-            LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(name, size, libvirtformat, null, null);
-            s_logger.debug(volDef.toString());
-            try {
-                StorageVol vol = virtPool.storageVolCreateXML(volDef.toString(), 0);
-                volPath = vol.getPath();
-                volName = vol.getName();
-                volAllocation = vol.getInfo().allocation;
-                volCapacity = vol.getInfo().capacity;
-            } catch (LibvirtException e) {
-                throw new CloudRuntimeException(e.toString());
-            }
+            r.ioCtxDestroy(io);
+        } catch (RadosException e) {
+            throw new CloudRuntimeException(e.toString());
+        } catch (RbdException e) {
+            throw new CloudRuntimeException(e.toString());
         }
 
-        KVMPhysicalDisk disk = new KVMPhysicalDisk(volPath, volName, pool);
+        volPath = pool.getSourceDir() + "/" + name;
+        KVMPhysicalDisk disk = new KVMPhysicalDisk(volPath, name, pool);
         disk.setFormat(format);
-        disk.setSize(volAllocation);
-        disk.setVirtualSize(volCapacity);
+        disk.setSize(size);
+        disk.setVirtualSize(size);
         return disk;
     }
 
@@ -806,38 +867,44 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
      * If it has been created on Primary Storage, it will be copied on the Primary Storage
      */
     @Override
-    public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) {
+    public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
+            String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) {
 
-        String newUuid = name;
-        KVMStoragePool srcPool = template.getPool();
         KVMPhysicalDisk disk = null;
 
-        /*
-            With RBD you can't run qemu-img convert with an existing RBD image as destination
-            qemu-img will exit with the error that the destination already exists.
-            So for RBD we don't create the image, but let qemu-img do that for us.
-
-            We then create a KVMPhysicalDisk object that we can return
-         */
-        try {
-            if (destPool.getType() != StoragePoolType.RBD) {
-                disk = destPool.createPhysicalDisk(newUuid, format, template.getVirtualSize());
+        if (destPool.getType() == StoragePoolType.RBD) {
+            disk = createDiskFromTemplateOnRBD(template, name, format, provisioningType, size, destPool, timeout);
+        } else {
+            try {
+                String newUuid = name;
+                disk = destPool.createPhysicalDisk(newUuid, format, provisioningType, template.getVirtualSize());
                 if (template.getFormat() == PhysicalDiskFormat.TAR) {
-                    Script.runSimpleBashScript("tar -x -f " + template.getPath() + " -C " + disk.getPath(), timeout);
+                    Script.runSimpleBashScript("tar -x -f " + template.getPath() + " -C " + disk.getPath(), timeout); // TO BE FIXED to aware provisioningType
                 } else if (template.getFormat() == PhysicalDiskFormat.DIR) {
                     Script.runSimpleBashScript("mkdir -p " + disk.getPath());
                     Script.runSimpleBashScript("chmod 755 " + disk.getPath());
-                    Script.runSimpleBashScript("cp -p -r " + template.getPath() + "/* " + disk.getPath(), timeout);
+                    Script.runSimpleBashScript("cp -p -r " + template.getPath() + "/* " + disk.getPath(), timeout); // TO BE FIXED to aware provisioningType
                 } else if (format == PhysicalDiskFormat.QCOW2) {
-                    QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat());
-                    QemuImgFile destFile = new QemuImgFile(disk.getPath());
+                    QemuImg qemu = new QemuImg(timeout);
+                    QemuImgFile destFile = new QemuImgFile(disk.getPath(), format);
                     if (size > template.getVirtualSize()) {
                         destFile.setSize(size);
                     } else {
                         destFile.setSize(template.getVirtualSize());
                     }
-                    QemuImg qemu = new QemuImg(timeout);
-                    qemu.create(destFile, backingFile);
+                    Map<String, String> options = new HashMap<String, String>();
+                    options.put("preallocation", QemuImg.PreallocationType.getPreallocationType(provisioningType).toString());
+                    switch(provisioningType){
+                        case THIN:
+                            QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat());
+                            qemu.create(destFile, backingFile, options);
+                            break;
+                        case SPARSE:
+                        case FAT:
+                            QemuImgFile srcFile = new QemuImgFile(template.getPath(), template.getFormat());
+                            qemu.convert(srcFile, destFile, options);
+                            break;
+                    }
                 } else if (format == PhysicalDiskFormat.RAW) {
                     QemuImgFile sourceFile = new QemuImgFile(template.getPath(), template.getFormat());
                     QemuImgFile destFile = new QemuImgFile(disk.getPath(), PhysicalDiskFormat.RAW);
@@ -847,143 +914,167 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
                         destFile.setSize(template.getVirtualSize());
                     }
                     QemuImg qemu = new QemuImg(timeout);
-                    qemu.convert(sourceFile, destFile);
-                }
-            } else {
-                format = PhysicalDiskFormat.RAW;
-                disk = new KVMPhysicalDisk(destPool.getSourceDir() + "/" + newUuid, newUuid, destPool);
-                disk.setFormat(format);
-                if (size > template.getVirtualSize()) {
-                    disk.setSize(size);
-                    disk.setVirtualSize(size);
-                } else {
-                    // leave these as they were if size isn't applicable
-                    disk.setSize(template.getVirtualSize());
-                    disk.setVirtualSize(disk.getSize());
+                    Map<String, String> options = new HashMap<String, String>();
+                    qemu.convert(sourceFile, destFile, options);
                 }
+            } catch (QemuImgException e) {
+                s_logger.error("Failed to create " + disk.getPath() +
+                        " due to a failed executing of qemu-img: " + e.getMessage());
+            }
+        }
 
-                QemuImg qemu = new QemuImg(timeout);
-                QemuImgFile srcFile;
-                QemuImgFile destFile =
-                        new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(), destPool.getSourcePort(), destPool.getAuthUserName(),
-                                destPool.getAuthSecret(), disk.getPath()));
-                destFile.setFormat(format);
-                if (size > template.getVirtualSize()) {
-                    destFile.setSize(size);
-                } else {
-                    destFile.setSize(template.getVirtualSize());
-                }
+        if (disk == null) {
+            throw new CloudRuntimeException("Failed to create disk from template " + template.getName());
+        }
 
-                if (srcPool.getType() != StoragePoolType.RBD) {
-                    srcFile = new QemuImgFile(template.getPath(), template.getFormat());
-                    qemu.convert(srcFile, destFile);
-                } else {
+        return disk;
+    }
 
-                    /**
-                     * We have to find out if the source file is in the same RBD pool and has
-                     * RBD format 2 before we can do a layering/clone operation on the RBD image
-                     *
-                     * This will be the case when the template is already on Primary Storage and
-                     * we want to copy it
-                     */
+    private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template,
+            String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout){
 
-                    try {
-                        if ((srcPool.getSourceHost().equals(destPool.getSourceHost())) && (srcPool.getSourceDir().equals(destPool.getSourceDir()))) {
+        /*
+            With RBD you can't run qemu-img convert with an existing RBD image as destination
+            qemu-img will exit with the error that the destination already exists.
+            So for RBD we don't create the image, but let qemu-img do that for us.
+
+            We then create a KVMPhysicalDisk object that we can return
+        */
+
+        KVMStoragePool srcPool = template.getPool();
+        KVMPhysicalDisk disk = null;
+        String newUuid = name;
+
+        format = PhysicalDiskFormat.RAW;
+        disk = new KVMPhysicalDisk(destPool.getSourceDir() + "/" + newUuid, newUuid, destPool);
+        disk.setFormat(format);
+        if (size > template.getVirtualSize()) {
+            disk.setSize(size);
+            disk.setVirtualSize(size);
+        } else {
+            // leave these as they were if size isn't applicable
+            disk.setSize(template.getVirtualSize());
+            disk.setVirtualSize(disk.getSize());
+        }
+
+
+        QemuImg qemu = new QemuImg(timeout);
+        QemuImgFile srcFile;
+        QemuImgFile destFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(),
+                destPool.getSourcePort(),
+                destPool.getAuthUserName(),
+                destPool.getAuthSecret(),
+                disk.getPath()));
+        destFile.setFormat(format);
+
+
+        if (srcPool.getType() != StoragePoolType.RBD) {
+            srcFile = new QemuImgFile(template.getPath(), template.getFormat());
+            try{
+                qemu.convert(srcFile, destFile);
+            } catch (QemuImgException e) {
+                s_logger.error("Failed to create " + disk.getPath() +
+                        " due to a failed executing of qemu-img: " + e.getMessage());
+            }
+        } else {
+
+            /**
+             * We have to find out if the source file is in the same RBD pool and has
+             * RBD format 2 before we can do a layering/clone operation on the RBD image
+             *
+             * This will be the case when the template is already on Primary Storage and
+             * we want to copy it
+             */
+
+            try {
+                if ((srcPool.getSourceHost().equals(destPool.getSourceHost())) && (srcPool.getSourceDir().equals(destPool.getSourceDir()))) {
                             /* We are on the same Ceph cluster, but we require RBD format 2 on the source image */
-                            s_logger.debug("Trying to perform a RBD clone (layering) since we are operating in the same storage pool");
+                    s_logger.debug("Trying to perform a RBD clone (layering) since we are operating in the same storage pool");
 
-                            Rados r = new Rados(srcPool.getAuthUserName());
-                            r.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort());
-                            r.confSet("key", srcPool.getAuthSecret());
-                            r.confSet("client_mount_timeout", "30");
-                            r.connect();
-                            s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
+                    Rados r = new Rados(srcPool.getAuthUserName());
+                    r.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort());
+                    r.confSet("key", srcPool.getAuthSecret());
+                    r.confSet("client_mount_timeout", "30");
+                    r.connect();
+                    s_logger.debug("Succesfully connected to Ceph cluster at " + r.confGet("mon_host"));
 
-                            IoCTX io = r.ioCtxCreate(srcPool.getSourceDir());
-                            Rbd rbd = new Rbd(io);
-                            RbdImage srcImage = rbd.open(template.getName());
+                    IoCTX io = r.ioCtxCreate(srcPool.getSourceDir());
+                    Rbd rbd = new Rbd(io);
+                    RbdImage srcImage = rbd.open(template.getName());
 
-                            if (srcImage.isOldFormat()) {
+                    if (srcImage.isOldFormat()) {
                                 /* The source image is RBD format 1, we have to do a regular copy */
-                                s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() +
-                                        " is RBD format 1. We have to perform a regular copy (" + disk.getVirtualSize() + " bytes)");
+                        s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() +
+                                " is RBD format 1. We have to perform a regular copy (" + disk.getVirtualSize() + " bytes)");
 
-                                rbd.create(disk.getName(), disk.getVirtualSize(), rbdFeatures, rbdOrder);
-                                RbdImage destImage = rbd.open(disk.getName());
+                        rbd.create(disk.getName(), disk.getVirtualSize(), rbdFeatures, rbdOrder);
+                        RbdImage destImage = rbd.open(disk.getName());
 
-                                s_logger.debug("Starting to copy " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir());
-                                rbd.copy(srcImage, destImage);
+                        s_logger.debug("Starting to copy " + srcImage.getName() +  " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir());
+                        rbd.copy(srcImage, destImage);
 
-                                s_logger.debug("Finished copying " + srcImage.getName() + " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir());
-                                rbd.close(destImage);
-                            } else {
-                                s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName() +
-                                        " is RBD format 2. We will perform a RBD clone using snapshot " + rbdTemplateSnapName);
+                        s_logger.debug("Finished copying " + srcImage.getName() +  " to " + destImage.getName() + " in Ceph pool " + srcPool.getSourceDir());
+                        rbd.close(destImage);
+                    } else {
+                        s_logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName()
+                                + " is RBD format 2. We will perform a RBD clone using snapshot "
+                                + this.rbdTemplateSnapName);
                                 /* The source image is format 2, we can do a RBD snapshot+clone (layering) */
-                                rbd.clone(template.getName(), rbdTemplateSnapName, io, disk.getName(), rbdFeatures, rbdOrder);
-                                s_logger.debug("Succesfully cloned " + template.getName() + "@" + rbdTemplateSnapName + " to " + disk.getName());
-                            }
+                        rbd.clone(template.getName(), this.rbdTemplateSnapName, io, disk.getName(), this.rbdFeatures, this.rbdOrder);
+                        s_logger.debug("Succesfully cloned " + template.getName() + "@" + this.rbdTemplateSnapName + " to " + disk.getName());
+                    }
 
-                            rbd.close(srcImage);
-                            r.ioCtxDestroy(io);
-                        } else {
+                    rbd.close(srcImage);
+                    r.ioCtxDestroy(io);
+                } else {
                             /* The source pool or host is not the same Ceph cluster, we do a simple copy with Qemu-Img */
-                            s_logger.debug("Both the source and destination are RBD, but not the same Ceph cluster. Performing a copy");
+                    s_logger.debug("Both the source and destination are RBD, but not the same Ceph cluster. Performing a copy");
 
-                            Rados rSrc = new Rados(srcPool.getAuthUserName());
-                            rSrc.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort());
-                            rSrc.confSet("key", srcPool.getAuthSecret());
-                            rSrc.confSet("client_mount_timeout", "30");
-                            rSrc.connect();
-                            s_logger.debug("Succesfully connected to source Ceph cluster at " + rSrc.confGet("mon_host"));
+                    Rados rSrc = new Rados(srcPool.getAuthUserName());
+                    rSrc.confSet("mon_host", srcPool.getSourceHost() + ":" + srcPool.getSourcePort());
+                    rSrc.confSet("key", srcPool.getAuthSecret());
+                    rSrc.confSet("client_mount_timeout", "30");
+                    rSrc.connect();
+                    s_logger.debug("Succesfully connected to source Ceph cluster at " + rSrc.confGet("mon_host"));
 
-                            Rados rDest = new Rados(destPool.getAuthUserName());
-                            rDest.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort());
-                            rDest.confSet("key", destPool.getAuthSecret());
-                            rDest.confSet("client_mount_timeout", "30");
-                            rDest.connect();
-                            s_logger.debug("Succesfully connected to source Ceph cluster at " + rDest.confGet("mon_host"));
+                    Rados rDest = new Rados(destPool.getAuthUserName());
+                    rDest.confSet("mon_host", destPool.getSourceHost() + ":" + destPool.getSourcePort());
+                    rDest.confSet("key", destPool.getAuthSecret());
+                    rDest.confSet("client_mount_timeout", "30");
+                    rDest.connect();
+                    s_logger.debug("Succesfully connected to source Ceph cluster at " + rDest.confGet("mon_host"));
 
-                            IoCTX sIO = rSrc.ioCtxCreate(srcPool.getSourceDir());
-                            Rbd sRbd = new Rbd(sIO);
+                    IoCTX sIO = rSrc.ioCtxCreate(srcPool.getSourceDir());
+                    Rbd sRbd = new Rbd(sIO);
 
-                            IoCTX dIO = rDest.ioCtxCreate(destPool.getSourceDir());
-                            Rbd dRbd = new Rbd(dIO);
+                    IoCTX dIO = rDest.ioCtxCreate(destPool.getSourceDir());
+                    Rbd dRbd = new Rbd(dIO);
 
-                            s_logger.debug("Creating " + disk.getName() + " on the destination cluster " + rDest.confGet("mon_host") + " in pool " +
-                                    destPool.getSourceDir());
-                            dRbd.create(disk.getName(), disk.getVirtualSize(), rbdFeatures, rbdOrder);
+                    s_logger.debug("Creating " + disk.getName() + " on the destination cluster " + rDest.confGet("mon_host") + " in pool " +
+                            destPool.getSourceDir());
+                    dRbd.create(disk.getName(), disk.getVirtualSize(), rbdFeatures, rbdOrder);
 
-                            RbdImage srcImage = sRbd.open(template.getName());
-                            RbdImage destImage = dRbd.open(disk.getName());
+                    RbdImage srcImage = sRbd.open(template.getName());
+                    RbdImage destImage = dRbd.open(disk.getName());
 
-                            s_logger.debug("Copying " + template.getName() + " from Ceph cluster " + rSrc.confGet("mon_host") + " to " + disk.getName() + " on cluster " +
-                                    rDest.confGet("mon_host"));
-                            sRbd.copy(srcImage, destImage);
+                    s_logger.debug("Copying " + template.getName() + " from Ceph cluster " + rSrc.confGet("mon_host") + " to " + disk.getName()
+                            + " on cluster " + rDest.confGet("mon_host"));
+                    sRbd.copy(srcImage, destImage);
 
-                            sRbd.close(srcImage);
-                            dRbd.close(destImage);
+                    sRbd.close(srcImage);
+                    dRbd.close(destImage);
 
-                            rSrc.ioCtxDestroy(sIO);
-                            rDest.ioCtxDestroy(dIO);
-                        }
-                    } catch (RadosException e) {
-                        s_logger.error("Failed to perform a RADOS action on the Ceph cluster, the error was: " + e.getMessage());
-                        disk = null;
-                    } catch (RbdException e) {
-                        s_logger.error("Failed to perform a RBD action on the Ceph cluster, the error was: " + e.getMessage());
-                        disk = null;
-                    }
+                    rSrc.ioCtxDestroy(sIO);
+                    rDest.ioCtxDestroy(dIO);
                 }
+            } catch (RadosException e) {
+                s_logger.error("Failed to perform a RADOS action on the Ceph cluster, the error was: " + e.getMessage());
+                disk = null;
+            } catch (RbdException e) {
+                s_logger.error("Failed to perform a RBD action on the Ceph cluster, the error was: " + e.getMessage());
+                disk = null;
             }
-        } catch (QemuImgException e) {
-            s_logger.error("Failed to create " + disk.getPath() + " due to a failed executing of qemu-img: " + e.getMessage());
-        }
-
-        if (disk == null) {
-            throw new CloudRuntimeException("Failed to create disk from template " + template.getName());
         }
-
         return disk;
     }
 
@@ -1037,13 +1128,13 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
         s_logger.debug("copyPhysicalDisk: disk size:" + disk.getSize() + ", virtualsize:" + disk.getVirtualSize()+" format:"+disk.getFormat());
         if (destPool.getType() != StoragePoolType.RBD) {
             if (disk.getFormat() == PhysicalDiskFormat.TAR) {
-                newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, disk.getVirtualSize());
+                newDisk = destPool.createPhysicalDisk(name, PhysicalDiskFormat.DIR, Storage.ProvisioningType.THIN, disk.getVirtualSize());
             } else {
                 /* If the source device is on a RBD storage pool force the new disk to the same format (RAW) */
                 if (srcPool.getType() != StoragePoolType.RBD) {
-                    newDisk = destPool.createPhysicalDisk(name, disk.getVirtualSize());
+                    newDisk = destPool.createPhysicalDisk(name, Storage.ProvisioningType.THIN, disk.getVirtualSize());
                 } else {
-                    newDisk = destPool.createPhysicalDisk(name, sourceFormat, disk.getVirtualSize());
+                    newDisk = destPool.createPhysicalDisk(name, sourceFormat, Storage.ProvisioningType.THIN, disk.getVirtualSize());
                 }
             }
         } else {
@@ -1071,7 +1162,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
                 srcFile = new QemuImgFile(sourcePath, sourceFormat);
                 try {
                     Map<String, String> info = qemu.info(srcFile);
-                    String backingFile = info.get(new String("backing_file"));
+                    String backingFile = info.get(new String("backing-file"));
                     // qcow2 templates can just be copied into place
                     if (sourceFormat.equals(destFormat) && backingFile == null && sourcePath.endsWith(".qcow2")) {
                         String result = Script.runSimpleBashScript("cp -f " + sourcePath + " " + destPath, timeout);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
index 605d154..7678f1d 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
@@ -25,6 +25,7 @@ import org.libvirt.StoragePool;
 
 import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 
+import com.cloud.storage.Storage;
 import com.cloud.storage.Storage.StoragePoolType;
 import com.cloud.utils.exception.CloudRuntimeException;
 
@@ -113,13 +114,16 @@ public class LibvirtStoragePool implements KVMStoragePool {
     }
 
     @Override
-    public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, long size) {
-        return this._storageAdaptor.createPhysicalDisk(name, this, format, size);
+    public KVMPhysicalDisk createPhysicalDisk(String name,
+            PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) {
+        return this._storageAdaptor
+                .createPhysicalDisk(name, this, format, provisioningType, size);
     }
 
     @Override
-    public KVMPhysicalDisk createPhysicalDisk(String name, long size) {
-        return this._storageAdaptor.createPhysicalDisk(name, this, this.getDefaultFormat(), size);
+    public KVMPhysicalDisk createPhysicalDisk(String name, Storage.ProvisioningType provisioningType, long size) {
+        return this._storageAdaptor.createPhysicalDisk(name, this,
+                this.getDefaultFormat(), provisioningType, size);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
index c751aab..ff14148 100644
--- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java
@@ -21,6 +21,7 @@ import java.util.Map;
 
 import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 
+import com.cloud.storage.Storage;
 import com.cloud.storage.Storage.StoragePoolType;
 
 public interface StorageAdaptor {
@@ -35,7 +36,8 @@ public interface StorageAdaptor {
 
     public boolean deleteStoragePool(String uuid);
 
-    public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, long size);
+    public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
+            PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size);
 
     // given disk path (per database) and pool, prepare disk on host
     public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map<String, String> details);
@@ -49,7 +51,9 @@ public interface StorageAdaptor {
 
     public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool);
 
-    public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout);
+    public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
+            String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size,
+            KVMStoragePool destPool, int timeout);
 
     public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
index 4bec375..56ed607 100644
--- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
+++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java
@@ -16,11 +16,16 @@
 // under the License.
 package org.apache.cloudstack.utils.qemu;
 
-import java.util.HashMap;
 import java.util.Map;
+import com.cloud.storage.Storage;
 
-import com.cloud.utils.script.OutputInterpreter;
 import com.cloud.utils.script.Script;
+import com.cloud.utils.script.OutputInterpreter;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import org.apache.commons.lang.NotImplementedException;
+
+import java.lang.reflect.Type;
 
 public class QemuImg {
 
@@ -43,6 +48,35 @@ public class QemuImg {
         }
     }
 
+    public static enum PreallocationType {
+        Off("off"),
+        Metadata("metadata"),
+        Full("full");
+
+        private final String preallocationType;
+
+        private PreallocationType(String preallocationType){
+            this.preallocationType = preallocationType;
+        }
+
+        public String toString(){
+            return this.preallocationType;
+        }
+
+        public static PreallocationType getPreallocationType(Storage.ProvisioningType provisioningType){
+            switch (provisioningType){
+                case THIN:
+                    return PreallocationType.Off;
+                case SPARSE:
+                    return PreallocationType.Metadata;
+                case FAT:
+                    return PreallocationType.Full;
+                default:
+                    throw new NotImplementedException();
+            }
+        }
+    }
+
     public QemuImg(int timeout) {
         this.timeout = timeout;
     }
@@ -251,9 +285,9 @@ public class QemuImg {
      * Qemu-img returns human readable output, but this method does it's best
      * to turn that into machine readeable data.
      *
-     * Spaces in keys are replaced by underscores (_).
-     * Sizes (virtual_size and disk_size) are returned in bytes
-     * Paths (image and backing_file) are the absolute path to the file
+     * Spaces in keys are replaced by hyphen-minus (-).
+     * Sizes (virtual-size and disk-size) are returned in bytes
+     * Paths (image and backing-file) are the absolute path to the file
      *
      * @param file
      *            A QemuImgFile object containing the file to get the information from
@@ -262,6 +296,8 @@ public class QemuImg {
     public Map<String, String> info(QemuImgFile file) throws QemuImgException {
         Script s = new Script(_qemuImgPath);
         s.add("info");
+        s.add("--output");
+        s.add("json");
         s.add(file.getFileName());
         OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
         String result = s.execute(parser);
@@ -269,24 +305,9 @@ public class QemuImg {
             throw new QemuImgException(result);
         }
 
-        HashMap<String, String> info = new HashMap<String, String>();
-        String[] outputBuffer = parser.getLines().trim().split("\n");
-        for (int i = 0; i < outputBuffer.length; i++) {
-            String[] lineBuffer = outputBuffer[i].split(":", 2);
-            if (lineBuffer.length == 2) {
-                String key = lineBuffer[0].trim().replace(" ", "_");
-                String value = null;
-
-                if (key.equals("virtual_size")) {
-                    value = lineBuffer[1].trim().replaceAll("^.*\\(([0-9]+).*$", "$1");
-                } else {
-                    value = lineBuffer[1].trim();
-                }
-
-                info.put(key, value);
-            }
-        }
-        return info;
+        Type stringStringMap = new TypeToken<Map<String, String>>(){}.getType();
+        Gson gson = new Gson();
+        return gson.fromJson(parser.getLines(), stringStringMap);
     }
 
     /* List, apply, create or delete snapshots in image */

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java
----------------------------------------------------------------------
diff --git a/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java b/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java
index 8bdff4d..9059f8c 100644
--- a/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java
+++ b/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java
@@ -17,9 +17,11 @@
 package org.apache.cloudstack.utils.qemu;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 import java.io.File;
+import com.cloud.utils.script.Script;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
@@ -29,6 +31,7 @@ import org.junit.Test;
 
 import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
 
+
 @Ignore
 public class QemuImgTest {
 
@@ -48,7 +51,7 @@ public class QemuImgTest {
             fail("We didn't get any information back from qemu-img");
         }
 
-        Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+        Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
         assertEquals(Long.valueOf(size), Long.valueOf(infoSize));
 
         String infoPath = info.get(new String("image"));
@@ -75,13 +78,13 @@ public class QemuImgTest {
         qemu.create(file, options);
         Map<String, String> info = qemu.info(file);
 
-        Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+        Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
         assertEquals(Long.valueOf(size), Long.valueOf(infoSize));
 
         String infoPath = info.get(new String("image"));
         assertEquals(filename, infoPath);
 
-        String infoClusterSize = info.get(new String("cluster_size"));
+        String infoClusterSize = info.get(new String("cluster-size"));
         assertEquals(clusterSize, infoClusterSize);
 
         File f = new File(filename);
@@ -90,6 +93,31 @@ public class QemuImgTest {
     }
 
     @Test
+    public void testCreateSparseVolume() throws QemuImgException {
+        String filename = "/tmp/" + UUID.randomUUID() + ".qcow2";
+
+        /* 10TB virtual_size */
+        long size = 10995116277760l;
+        QemuImgFile file = new QemuImgFile(filename, size, PhysicalDiskFormat.QCOW2);
+        String preallocation = "metadata";
+        Map<String, String> options = new HashMap<String, String>();
+
+        options.put("preallocation", preallocation);
+
+        QemuImg qemu = new QemuImg(0);
+        qemu.create(file, options);
+
+        String allocatedSize = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $1}'", file));
+        String declaredSize  = Script.runSimpleBashScript(String.format("ls -alhs %s | awk '{print $6}'", file));
+
+        assertFalse(allocatedSize.equals(declaredSize));
+
+        File f = new File(filename);
+        f.delete();
+
+    }
+
+    @Test
     public void testCreateAndResize() throws QemuImgException {
         String filename = "/tmp/" + UUID.randomUUID() + ".qcow2";
 
@@ -107,7 +135,7 @@ public class QemuImgTest {
                 fail("We didn't get any information back from qemu-img");
             }
 
-            Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+            Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
             assertEquals(Long.valueOf(endSize), Long.valueOf(infoSize));
         } catch (QemuImgException e) {
             fail(e.getMessage());
@@ -136,7 +164,7 @@ public class QemuImgTest {
                 fail("We didn't get any information back from qemu-img");
             }
 
-            Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+            Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
             assertEquals(Long.valueOf(startSize + increment), Long.valueOf(infoSize));
         } catch (QemuImgException e) {
             fail(e.getMessage());
@@ -164,7 +192,7 @@ public class QemuImgTest {
                 fail("We didn't get any information back from qemu-img");
             }
 
-            Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+            Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
             assertEquals(Long.valueOf(startSize + increment), Long.valueOf(infoSize));
         } catch (QemuImgException e) {
             fail(e.getMessage());
@@ -227,7 +255,7 @@ public class QemuImgTest {
             fail("We didn't get any information back from qemu-img");
         }
 
-        String backingFile = info.get(new String("backing_file"));
+        String backingFile = info.get(new String("backing-file"));
         if (backingFile == null) {
             fail("The second file does not have a property backing_file! Create failed?");
         }
@@ -275,10 +303,10 @@ public class QemuImgTest {
 
         Map<String, String> info = qemu.info(destFile);
 
-        PhysicalDiskFormat infoFormat = PhysicalDiskFormat.valueOf(info.get(new String("file_format")).toUpperCase());
+        PhysicalDiskFormat infoFormat = PhysicalDiskFormat.valueOf(info.get(new String("format")).toUpperCase());
         assertEquals(destFormat, infoFormat);
 
-        Long infoSize = Long.parseLong(info.get(new String("virtual_size")));
+        Long infoSize = Long.parseLong(info.get(new String("virtual-size")));
         assertEquals(Long.valueOf(srcSize), Long.valueOf(infoSize));
 
         File sf = new File(srcFileName);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java
index 2a7bcac..0415bae 100644
--- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java
+++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java
@@ -83,6 +83,7 @@ import com.cloud.offering.ServiceOffering;
 import com.cloud.offerings.dao.NetworkOfferingDao;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.Storage;
 import com.cloud.user.Account;
 import com.cloud.user.AccountService;
 import com.cloud.utils.NumbersUtil;
@@ -294,8 +295,9 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast
         _elasticLbVmRamSize = NumbersUtil.parseInt(configs.get(Config.ElasticLoadBalancerVmMemory.key()), DEFAULT_ELB_VM_RAMSIZE);
         _elasticLbvmCpuMHz = NumbersUtil.parseInt(configs.get(Config.ElasticLoadBalancerVmCpuMhz.key()), DEFAULT_ELB_VM_CPU_MHZ);
         _elasticLbvmNumCpu = NumbersUtil.parseInt(configs.get(Config.ElasticLoadBalancerVmNumVcpu.key()), 1);
-        _elasticLbVmOffering = new ServiceOfferingVO("System Offering For Elastic LB VM", _elasticLbvmNumCpu, _elasticLbVmRamSize, _elasticLbvmCpuMHz, 0, 0, true, null,
-                useLocalStorage, true, null, true, VirtualMachine.Type.ElasticLoadBalancerVm, true);
+        _elasticLbVmOffering = new ServiceOfferingVO("System Offering For Elastic LB VM", _elasticLbvmNumCpu,
+                _elasticLbVmRamSize, _elasticLbvmCpuMHz, 0, 0, true, null, Storage.ProvisioningType.THIN, useLocalStorage,
+                true, null, true, VirtualMachine.Type.ElasticLoadBalancerVm, true);
         _elasticLbVmOffering.setUniqueName(ServiceOffering.elbVmDefaultOffUniqueName);
         _elasticLbVmOffering = _serviceOfferingDao.persistSystemServiceOffering(_elasticLbVmOffering);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
index 89707c9..a794390 100644
--- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
+++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
@@ -92,6 +92,7 @@ import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.dao.VMTemplateDao;
+import com.cloud.storage.Storage;
 import com.cloud.user.Account;
 import com.cloud.user.AccountManager;
 import com.cloud.user.User;
@@ -379,7 +380,8 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
             boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key()));
             ServiceOfferingVO newOff =
                 new ServiceOfferingVO("System Offering For Internal LB VM", 1, InternalLoadBalancerVMManager.DEFAULT_INTERNALLB_VM_RAMSIZE,
-                    InternalLoadBalancerVMManager.DEFAULT_INTERNALLB_VM_CPU_MHZ, null, null, true, null, useLocalStorage, true, null, true,
+                    InternalLoadBalancerVMManager.DEFAULT_INTERNALLB_VM_CPU_MHZ, null, null, true, null,
+                    Storage.ProvisioningType.THIN, useLocalStorage, true, null, true,
                     VirtualMachine.Type.InternalLoadBalancerVm, true);
             newOff.setUniqueName(ServiceOffering.internalLbVmDefaultOffUniqueName);
             newOff = _serviceOfferingDao.persistSystemServiceOffering(newOff);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMManagerTest.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMManagerTest.java b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMManagerTest.java
index f418586..375ba5e 100644
--- a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMManagerTest.java
+++ b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMManagerTest.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import javax.inject.Inject;
 
+import com.cloud.storage.Storage;
 import junit.framework.TestCase;
 
 import org.junit.Before;
@@ -117,8 +118,8 @@ public class InternalLBVMManagerTest extends TestCase {
     public void setUp() {
         //mock system offering creation as it's used by configure() method called by initComponentsLifeCycle
         Mockito.when(_accountMgr.getAccount(1L)).thenReturn(new AccountVO());
-        ServiceOfferingVO off =
-            new ServiceOfferingVO("alena", 1, 1, 1, 1, 1, false, "alena", false, false, null, false, VirtualMachine.Type.InternalLoadBalancerVm, false);
+        ServiceOfferingVO off = new ServiceOfferingVO("alena", 1, 1,
+                1, 1, 1, false, "alena", Storage.ProvisioningType.THIN, false, false, null, false, VirtualMachine.Type.InternalLoadBalancerVm, false);
         off = setId(off, 1);
         Mockito.when(_svcOffDao.persistSystemServiceOffering(Matchers.any(ServiceOfferingVO.class))).thenReturn(off);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMServiceTest.java
----------------------------------------------------------------------
diff --git a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMServiceTest.java b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMServiceTest.java
index 62ca09b..11a0bed 100644
--- a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMServiceTest.java
+++ b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbvmmgr/InternalLBVMServiceTest.java
@@ -20,6 +20,7 @@ import java.lang.reflect.Field;
 
 import javax.inject.Inject;
 
+import com.cloud.storage.Storage;
 import junit.framework.TestCase;
 
 import org.junit.After;
@@ -87,8 +88,8 @@ public class InternalLBVMServiceTest extends TestCase {
     public void setUp() {
         //mock system offering creation as it's used by configure() method called by initComponentsLifeCycle
         Mockito.when(_accountMgr.getAccount(1L)).thenReturn(new AccountVO());
-        ServiceOfferingVO off =
-            new ServiceOfferingVO("alena", 1, 1, 1, 1, 1, false, "alena", false, false, null, false, VirtualMachine.Type.InternalLoadBalancerVm, false);
+        ServiceOfferingVO off = new ServiceOfferingVO("alena", 1, 1,
+                1, 1, 1, false, "alena", Storage.ProvisioningType.THIN, false, false, null, false, VirtualMachine.Type.InternalLoadBalancerVm, false);
         off = setId(off, 1);
         Mockito.when(_svcOffDao.persistSystemServiceOffering(Matchers.any(ServiceOfferingVO.class))).thenReturn(off);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
index bf28c03..a99f19d 100644
--- a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java
@@ -59,6 +59,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase<DiskOfferingJoinVO,
         diskOfferingResponse.setId(offering.getUuid());
         diskOfferingResponse.setName(offering.getName());
         diskOfferingResponse.setDisplayText(offering.getDisplayText());
+        diskOfferingResponse.setProvisioningType(offering.getProvisioningType().toString());
         diskOfferingResponse.setCreated(offering.getCreated());
         diskOfferingResponse.setDiskSize(offering.getDiskSize() / (1024 * 1024 * 1024));
         diskOfferingResponse.setMinIops(offering.getMinIops());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java
index 7fcc5c6..4b4c73b 100644
--- a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java
@@ -59,6 +59,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase<ServiceOfferingJo
         offeringResponse.setDefaultUse(offering.isDefaultUse());
         offeringResponse.setSystemVmType(offering.getSystemVmType());
         offeringResponse.setDisplayText(offering.getDisplayText());
+        offeringResponse.setProvisioningType(offering.getProvisioningType().toString());
         offeringResponse.setCpuNumber(offering.getCpu());
         offeringResponse.setCpuSpeed(offering.getSpeed());
         offeringResponse.setMemory(offering.getRamSize());

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
index 8a0431b..04e99cf 100644
--- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
+++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java
@@ -99,6 +99,8 @@ public class VolumeJoinDaoImpl extends GenericDaoBase<VolumeJoinVO, Long> implem
             volResponse.setVirtualMachineDisplayName(volume.getVmDisplayName());
         }
 
+        volResponse.setProvisioningType(volume.getProvisioningType().toString());
+
         // Show the virtual size of the volume
         volResponse.setSize(volume.getSize());
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
index e3e0a9e..1a1c787 100644
--- a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java
@@ -23,6 +23,7 @@ import javax.persistence.Entity;
 import javax.persistence.Id;
 import javax.persistence.Table;
 
+import com.cloud.storage.Storage;
 import org.apache.cloudstack.api.Identity;
 import org.apache.cloudstack.api.InternalIdentity;
 
@@ -46,6 +47,9 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity,
     @Column(name = "display_text")
     private String displayText;
 
+    @Column(name = "provisioning_type")
+    Storage.ProvisioningType provisioningType;
+
     @Column(name = "disk_size")
     long diskSize;
 
@@ -136,6 +140,10 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity,
         return displayText;
     }
 
+    public Storage.ProvisioningType getProvisioningType(){
+        return provisioningType;
+    }
+
     public long getDiskSize() {
         return diskSize;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java
index dbeb530..f16ba83 100644
--- a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java
@@ -23,6 +23,8 @@ import javax.persistence.Entity;
 import javax.persistence.Id;
 import javax.persistence.Table;
 
+import com.cloud.storage.Storage;
+
 import org.apache.cloudstack.api.Identity;
 import org.apache.cloudstack.api.InternalIdentity;
 
@@ -45,6 +47,9 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
     @Column(name = "display_text")
     private String displayText;
 
+    @Column(name = "provisioning_type")
+    Storage.ProvisioningType provisioningType;
+
     @Column(name = "tags", length = 4096)
     String tags;
 
@@ -156,6 +161,10 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit
         return displayText;
     }
 
+    public Storage.ProvisioningType getProvisioningType(){
+        return provisioningType;
+    }
+
     public String getTags() {
         return tags;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
index 8165d68..cbc9efe 100644
--- a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
+++ b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java
@@ -56,6 +56,10 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity {
     @Enumerated(EnumType.STRING)
     Volume.Type volumeType;
 
+    @Column(name = "provisioning_type")
+    @Enumerated(EnumType.STRING)
+    Storage.ProvisioningType provisioningType;
+
     @Column(name = "size")
     long size;
 
@@ -292,6 +296,10 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity {
         return volumeType;
     }
 
+    public Storage.ProvisioningType getProvisioningType(){
+        return provisioningType;
+    }
+
     public long getSize() {
         return size;
     }

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
index 30afd7b..6aca4a8 100755
--- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
+++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java
@@ -175,6 +175,7 @@ import com.cloud.service.ServiceOfferingDetailsVO;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.service.dao.ServiceOfferingDetailsDao;
+import com.cloud.storage.Storage.ProvisioningType;
 import com.cloud.storage.DiskOfferingVO;
 import com.cloud.storage.dao.DiskOfferingDao;
 import com.cloud.test.IPRangeConfig;
@@ -2018,19 +2019,24 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
             }
         }
 
-        return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber, memory, cpuSpeed, cmd.getDisplayText(), localStorageRequired,
-                offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(), cmd.getNetworkRate(), cmd.getDeploymentPlanner(), cmd.getDetails(),
-                cmd.isCustomizedIops(), cmd.getMinIops(), cmd.getMaxIops(), cmd.getBytesReadRate(), cmd.getBytesWriteRate(), cmd.getIopsReadRate(), cmd.getIopsWriteRate(),
-                cmd.getHypervisorSnapshotReserve());
+        return createServiceOffering(userId, cmd.getIsSystem(), vmType, cmd.getServiceOfferingName(), cpuNumber, memory, cpuSpeed, cmd.getDisplayText(),
+                cmd.getProvisioningType(), localStorageRequired, offerHA, limitCpuUse, volatileVm, cmd.getTags(), cmd.getDomainId(), cmd.getHostTag(),
+                cmd.getNetworkRate(), cmd.getDeploymentPlanner(), cmd.getDetails(), cmd.isCustomizedIops(), cmd.getMinIops(), cmd.getMaxIops(),
+                cmd.getBytesReadRate(), cmd.getBytesWriteRate(), cmd.getIopsReadRate(), cmd.getIopsWriteRate(), cmd.getHypervisorSnapshotReserve());
     }
 
-    protected ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vmType, String name, Integer cpu, Integer ramSize, Integer speed,
-            String displayText, boolean localStorageRequired, boolean offerHA, boolean limitResourceUse, boolean volatileVm, String tags, Long domainId, String hostTag,
+    protected ServiceOfferingVO createServiceOffering(long userId, boolean isSystem, VirtualMachine.Type vmType,
+            String name, Integer cpu, Integer ramSize, Integer speed, String displayText, String provisioningType, boolean localStorageRequired,
+            boolean offerHA, boolean limitResourceUse, boolean volatileVm,  String tags, Long domainId, String hostTag,
             Integer networkRate, String deploymentPlanner, Map<String, String> details, Boolean isCustomizedIops, Long minIops, Long maxIops,
             Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate, Integer hypervisorSnapshotReserve) {
+
+        ProvisioningType typedProvisioningType = ProvisioningType.getProvisioningType(provisioningType);
+
         tags = StringUtils.cleanupTags(tags);
-        ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA, limitResourceUse, volatileVm, displayText, localStorageRequired,
-                false, tags, isSystem, vmType, domainId, hostTag, deploymentPlanner);
+        ServiceOfferingVO offering = new ServiceOfferingVO(name, cpu, ramSize, speed, networkRate, null, offerHA,
+                limitResourceUse, volatileVm, displayText, typedProvisioningType, localStorageRequired, false, tags, isSystem, vmType,
+                domainId, hostTag, deploymentPlanner);
 
         if (isCustomizedIops != null) {
             bytesReadRate = null;
@@ -2200,8 +2206,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         }
     }
 
-    protected DiskOfferingVO createDiskOffering(Long domainId, String name, String description, Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired,
-            boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops, Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate,
+    protected DiskOfferingVO createDiskOffering(Long domainId, String name, String description, String provisioningType,
+            Long numGibibytes, String tags, boolean isCustomized, boolean localStorageRequired,
+            boolean isDisplayOfferingEnabled, Boolean isCustomizedIops, Long minIops, Long maxIops,
+            Long bytesReadRate, Long bytesWriteRate, Long iopsReadRate, Long iopsWriteRate,
             Integer hypervisorSnapshotReserve) {
         long diskSize = 0;// special case for custom disk offerings
         if (numGibibytes != null && (numGibibytes <= 0)) {
@@ -2209,6 +2217,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         } else if (numGibibytes != null && (numGibibytes > _maxVolumeSizeInGb)) {
             throw new InvalidParameterValueException("The maximum size for a disk is " + _maxVolumeSizeInGb + " Gb.");
         }
+        ProvisioningType typedProvisioningType = ProvisioningType.getProvisioningType(provisioningType);
 
         if (numGibibytes != null) {
             diskSize = numGibibytes * 1024 * 1024 * 1024;
@@ -2251,7 +2260,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         }
 
         tags = StringUtils.cleanupTags(tags);
-        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized, isCustomizedIops, minIops, maxIops);
+        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, typedProvisioningType, diskSize, tags, isCustomized,
+                isCustomizedIops, minIops, maxIops);
         newDiskOffering.setUseLocalStorage(localStorageRequired);
         newDiskOffering.setDisplayOffering(isDisplayOfferingEnabled);
 
@@ -2285,6 +2295,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
     public DiskOffering createDiskOffering(CreateDiskOfferingCmd cmd) {
         String name = cmd.getOfferingName();
         String description = cmd.getDisplayText();
+        String provisioningType = cmd.getProvisioningType();
         Long numGibibytes = cmd.getDiskSize();
         boolean isDisplayOfferingEnabled = cmd.getDisplayOffering() != null ? cmd.getDisplayOffering() : true;
         boolean isCustomized = cmd.isCustomized() != null ? cmd.isCustomized() : false; // false
@@ -2320,7 +2331,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
         Long iopsWriteRate = cmd.getIopsWriteRate();
         Integer hypervisorSnapshotReserve = cmd.getHypervisorSnapshotReserve();
 
-        return createDiskOffering(domainId, name, description, numGibibytes, tags, isCustomized, localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops,
+        return createDiskOffering(domainId, name, description, provisioningType, numGibibytes, tags, isCustomized,
+                localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops,
                 maxIops, bytesReadRate, bytesWriteRate, iopsReadRate, iopsWriteRate, hypervisorSnapshotReserve);
     }
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
index 0512096..14d782a 100755
--- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
+++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java
@@ -31,6 +31,7 @@ import javax.inject.Inject;
 import javax.naming.ConfigurationException;
 
 import org.apache.cloudstack.config.ApiServiceConfiguration;
+
 import org.apache.cloudstack.context.CallContext;
 import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService;
 import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
@@ -100,6 +101,7 @@ import com.cloud.resource.ServerResource;
 import com.cloud.resource.UnableDeleteHostException;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.Storage;
 import com.cloud.storage.StoragePoolStatus;
 import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
 import com.cloud.storage.VMTemplateVO;
@@ -1267,7 +1269,8 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy
             int ramSize = NumbersUtil.parseInt(_configDao.getValue("console.ram.size"), DEFAULT_PROXY_VM_RAMSIZE);
             int cpuFreq = NumbersUtil.parseInt(_configDao.getValue("console.cpu.mhz"), DEFAULT_PROXY_VM_CPUMHZ);
             _serviceOffering =
-                new ServiceOfferingVO("System Offering For Console Proxy", 1, ramSize, cpuFreq, 0, 0, false, null, useLocalStorage, true, null, true,
+                new ServiceOfferingVO("System Offering For Console Proxy", 1, ramSize, cpuFreq, 0, 0, false, null,
+                    Storage.ProvisioningType.THIN, useLocalStorage, true, null, true,
                     VirtualMachine.Type.ConsoleProxy, true);
             _serviceOffering.setUniqueName(ServiceOffering.consoleProxyDefaultOffUniqueName);
             _serviceOffering = _offeringDao.persistSystemServiceOffering(_serviceOffering);

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
index 3cd3e80..bbec5f7 100755
--- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
+++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java
@@ -226,6 +226,7 @@ import com.cloud.resource.ResourceManager;
 import com.cloud.server.ConfigurationServer;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.Storage.ProvisioningType;
 import com.cloud.storage.VMTemplateVO;
 import com.cloud.storage.Volume;
 import com.cloud.storage.VolumeVO;
@@ -741,9 +742,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V
         _agentMgr.registerForHostEvents(new SshKeysDistriMonitor(_agentMgr, _hostDao, _configDao), true, false, false);
 
         final boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key()));
-        _offering =
-                new ServiceOfferingVO("System Offering For Software Router", 1, _routerRamSize, _routerCpuMHz, null, null, true, null, useLocalStorage, true, null, true,
-                        VirtualMachine.Type.DomainRouter, true);
+        _offering = new ServiceOfferingVO("System Offering For Software Router", 1, _routerRamSize, _routerCpuMHz, null,
+                null, true, null, ProvisioningType.THIN, useLocalStorage, true, null, true, VirtualMachine.Type.DomainRouter, true);
         _offering.setUniqueName(ServiceOffering.routerDefaultOffUniqueName);
         _offering = _serviceOfferingDao.persistSystemServiceOffering(_offering);
 

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/server/ConfigurationServerImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java
index 13135b0..40f69aa 100755
--- a/server/src/com/cloud/server/ConfigurationServerImpl.java
+++ b/server/src/com/cloud/server/ConfigurationServerImpl.java
@@ -96,6 +96,7 @@ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao;
 import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.Storage.ProvisioningType;
 import com.cloud.storage.dao.DiskOfferingDao;
 import com.cloud.test.IPRangeConfig;
 import com.cloud.user.Account;
@@ -218,14 +219,14 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
             s_logger.debug("ConfigurationServer made secondary storage copy use realhostip.");
 
             // Save default service offerings
-            createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", false, false, null);
-            createServiceOffering(User.UID_SYSTEM, "Medium Instance", 1, 1024, 1000, "Medium Instance", false, false, null);
+            createServiceOffering(User.UID_SYSTEM, "Small Instance", 1, 512, 500, "Small Instance", ProvisioningType.THIN, false, false, null);
+            createServiceOffering(User.UID_SYSTEM, "Medium Instance", 1, 1024, 1000, "Medium Instance", ProvisioningType.THIN, false, false, null);
             // Save default disk offerings
-            createdefaultDiskOffering(null, "Small", "Small Disk, 5 GB", 5, null, false, false);
-            createdefaultDiskOffering(null, "Medium", "Medium Disk, 20 GB", 20, null, false, false);
-            createdefaultDiskOffering(null, "Large", "Large Disk, 100 GB", 100, null, false, false);
-            createdefaultDiskOffering(null, "Large", "Large Disk, 100 GB", 100, null, false, false);
-            createdefaultDiskOffering(null, "Custom", "Custom Disk", 0, null, true, false);
+            createdefaultDiskOffering(null, "Small", "Small Disk, 5 GB", ProvisioningType.THIN, 5, null, false, false);
+            createdefaultDiskOffering(null, "Medium", "Medium Disk, 20 GB", ProvisioningType.THIN, 20, null, false, false);
+            createdefaultDiskOffering(null, "Large", "Large Disk, 100 GB", ProvisioningType.THIN, 100, null, false, false);
+            createdefaultDiskOffering(null, "Large", "Large Disk, 100 GB", ProvisioningType.THIN, 100, null, false, false);
+            createdefaultDiskOffering(null, "Custom", "Custom Disk", ProvisioningType.THIN, 0, null, true, false);
 
             // Save the mount parent to the configuration table
             String mountParent = getMountParent();
@@ -1026,24 +1027,24 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio
         return pod;
     }
 
-    private DiskOfferingVO createdefaultDiskOffering(Long domainId, String name, String description, int numGibibytes, String tags, boolean isCustomized,
-            boolean isSystemUse) {
+    private DiskOfferingVO createdefaultDiskOffering(Long domainId, String name, String description, ProvisioningType provisioningType,
+            int numGibibytes, String tags, boolean isCustomized, boolean isSystemUse) {
         long diskSize = numGibibytes;
         diskSize = diskSize * 1024 * 1024 * 1024;
         tags = cleanupTags(tags);
 
-        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, diskSize, tags, isCustomized, null, null, null);
+        DiskOfferingVO newDiskOffering = new DiskOfferingVO(domainId, name, description, provisioningType, diskSize, tags, isCustomized, null, null, null);
         newDiskOffering.setUniqueName("Cloud.Com-" + name);
         newDiskOffering.setSystemUse(isSystemUse);
         newDiskOffering = _diskOfferingDao.persistDeafultDiskOffering(newDiskOffering);
         return newDiskOffering;
     }
 
-    private ServiceOfferingVO createServiceOffering(long userId, String name, int cpu, int ramSize, int speed, String displayText, boolean localStorageRequired,
-            boolean offerHA, String tags) {
+    private ServiceOfferingVO createServiceOffering(long userId, String name, int cpu, int ramSize, int speed, String displayText,
+            ProvisioningType provisioningType, boolean localStorageRequired, boolean offerHA, String tags) {
         tags = cleanupTags(tags);
         ServiceOfferingVO offering =
-                new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, offerHA, displayText, localStorageRequired, false, tags, false, null, false);
+                new ServiceOfferingVO(name, cpu, ramSize, speed, null, null, offerHA, displayText, provisioningType, localStorageRequired, false, tags, false, null, false);
         offering.setUniqueName("Cloud.Com-" + name);
         offering = _serviceOfferingDao.persistSystemServiceOffering(offering);
         return offering;

http://git-wip-us.apache.org/repos/asf/cloudstack/blob/11f5bdd7/server/src/com/cloud/storage/VolumeApiServiceImpl.java
----------------------------------------------------------------------
diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
index c0741c5..7a0517f 100644
--- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java
@@ -330,10 +330,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
         return Transaction.execute(new TransactionCallback<VolumeVO>() {
             @Override
             public VolumeVO doInTransaction(TransactionStatus status) {
-        VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK);
-        volume.setPoolId(null);
-        volume.setDataCenterId(zoneId);
-        volume.setPodId(null);
+                VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, new Long(-1), null, null, Storage.ProvisioningType.THIN, 0, Volume.Type.DATADISK);
+                volume.setPoolId(null);
+                volume.setDataCenterId(zoneId);
+                volume.setPodId(null);
                 // to prevent a nullpointer deref I put the system account id here when no owner is given.
                 // TODO Decide if this is valid or whether  throwing a CloudRuntimeException is more appropriate
                 volume.setAccountId((owner == null) ? Account.ACCOUNT_ID_SYSTEM : owner.getAccountId());
@@ -391,6 +391,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
         Long zoneId = cmd.getZoneId();
         Long diskOfferingId = null;
         DiskOfferingVO diskOffering = null;
+        Storage.ProvisioningType provisioningType;
         Long size = null;
         Long minIops = null;
         Long maxIops = null;
@@ -477,6 +478,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
                 }
             }
 
+            provisioningType = diskOffering.getProvisioningType();
+
             if (!validateVolumeSizeRange(size)) {// convert size from mb to gb
                 // for validation
                 throw new InvalidParameterValueException("Invalid size for custom volume creation: " + size + " ,max volume size is:" + _maxVolumeSizeInGb);
@@ -502,6 +505,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
             size = snapshotCheck.getSize(); // ; disk offering is used for tags
             // purposes
 
+            provisioningType = diskOffering.getProvisioningType();
+
             // one step operation - create volume in VM's cluster and attach it
             // to the VM
             Long vmId = cmd.getVirtualMachineId();
@@ -545,54 +550,55 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic
             userSpecifiedName = getRandomVolumeName();
         }
 
-        VolumeVO volume = commitVolume(cmd, caller, ownerId, displayVolume, zoneId, diskOfferingId, size, minIops, maxIops, parentVolume, userSpecifiedName,
-                _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()));
+        VolumeVO volume = commitVolume(cmd, caller, ownerId, displayVolume, zoneId, diskOfferingId, provisioningType, size,
+                minIops, maxIops, parentVolume, userSpecifiedName, _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()));
 
         return volume;
     }
 
-    private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final long ownerId, final Boolean displayVolume, final Long zoneId,
-            final Long diskOfferingId, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid) {
+    private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final long ownerId, final Boolean displayVolume,
+            final Long zoneId, final Long diskOfferingId, final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume,
+            final String userSpecifiedName, final String uuid) {
         return Transaction.execute(new TransactionCallback<VolumeVO>() {
             @Override
             public VolumeVO doInTransaction(TransactionStatus status) {
-        VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK);
-        volume.setPoolId(null);
+                VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, provisioningType, 0, Volume.Type.DATADISK);
+                volume.setPoolId(null);
                 volume.setUuid(uuid);
-        volume.setDataCenterId(zoneId);
-        volume.setPodId(null);
-        volume.setAccountId(ownerId);
-        volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()));
-        volume.setDiskOfferingId(diskOfferingId);
-        volume.setSize(size);
-        volume.setMinIops(minIops);
-        volume.setMaxIops(maxIops);
-        volume.setInstanceId(null);
-        volume.setUpdated(new Date());
-        volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId());
+                volume.setDataCenterId(zoneId);
+                volume.setPodId(null);
+                volume.setAccountId(ownerId);
+                volume.setDomainId(((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId()));
+                volume.setDiskOfferingId(diskOfferingId);
+                volume.setSize(size);
+                volume.setMinIops(minIops);
+                volume.setMaxIops(maxIops);
+                volume.setInstanceId(null);
+                volume.setUpdated(new Date());
+                volume.setDomainId((caller == null) ? Domain.ROOT_DOMAIN : caller.getDomainId());
                 volume.setDisplayVolume(displayVolume);
-        if (parentVolume != null) {
-            volume.setTemplateId(parentVolume.getTemplateId());
-            volume.setFormat(parentVolume.getFormat());
-        } else {
-            volume.setTemplateId(null);
-        }
+                if (parentVolume != null) {
+                    volume.setTemplateId(parentVolume.getTemplateId());
+                    volume.setFormat(parentVolume.getFormat());
+                } else {
+                    volume.setTemplateId(null);
+                }
 
-        volume = _volsDao.persist(volume);
-        if (cmd.getSnapshotId() == null && displayVolume) {
-            // for volume created from snapshot, create usage event after volume creation
-            UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
-                    diskOfferingId, null, size, Volume.class.getName(), volume.getUuid(), displayVolume);
-        }
+                volume = _volsDao.persist(volume);
+                if (cmd.getSnapshotId() == null && displayVolume) {
+                    // for volume created from snapshot, create usage event after volume creation
+                    UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, volume.getAccountId(), volume.getDataCenterId(), volume.getId(), volume.getName(),
+                            diskOfferingId, null, size, Volume.class.getName(), volume.getUuid(), displayVolume);
+                }
 
-        CallContext.current().setEventDetails("Volume Id: " + volume.getId());
+                CallContext.current().setEventDetails("Volume Id: " + volume.getId());
 
-        // Increment resource count during allocation; if actual creation fails,
-        // decrement it
+                // Increment resource count during allocation; if actual creation fails,
+                // decrement it
                 _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume, displayVolume);
                 _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, displayVolume, new Long(volume.getSize()));
-        return volume;
-    }
+                return volume;
+            }
         });
     }
 


Mime
View raw message