[PATCH] Added support for qcow2 format type
by Sharad Mishra
# HG changeset patch
# User Sharad Mishra <snmishra(a)us.ibm.com>
# Date 1293079091 28800
# Node ID 353104beb7b459f715f64bd88feb899bfce4458f
# Parent 35396e5b805193c024b6d4f065136d7b57a0b03d
Added support for qcow2 format type.
Only 'raw' storage volume was supported by libvirt-cim.
This patch adds support for qcow2.
Signed-off-by: Sharad Mishra <snmishra(a)us.ibm.com>
diff -r 35396e5b8051 -r 353104beb7b4 libxkutil/pool_parsing.h
--- a/libxkutil/pool_parsing.h Tue Dec 14 13:59:51 2010 -0800
+++ b/libxkutil/pool_parsing.h Wed Dec 22 20:38:11 2010 -0800
@@ -66,7 +66,8 @@
struct storage_vol {
enum {VOL_FORMAT_UNKNOWN,
- VOL_FORMAT_RAW} format_type;
+ VOL_FORMAT_RAW,
+ VOL_FORMAT_QCOW2} format_type;
char *vol_name;
char *path;
uint16_t alloc;
diff -r 35396e5b8051 -r 353104beb7b4 libxkutil/xmlgen.c
--- a/libxkutil/xmlgen.c Tue Dec 14 13:59:51 2010 -0800
+++ b/libxkutil/xmlgen.c Wed Dec 22 20:38:11 2010 -0800
@@ -1134,6 +1134,8 @@
switch (type) {
case VOL_FORMAT_RAW:
return "raw";
+ case VOL_FORMAT_QCOW2:
+ return "qcow2";
default:
CU_DEBUG("Unsupported storage volume type");
}
13 years, 11 months
[PATCH] [TEST] Verify that disk can be dynamically modified
by Sharad Mishra
# HG changeset patch
# User Sharad Mishra <snmishra(a)us.ibm.com>
# Date 1293659957 28800
# Node ID 52487783c9e1ae0967aa6f2c72269a85290c3f22
# Parent c08c7fb1eb78c209fd6af1fc40b9858b0027061b
[TEST] Verify that disk can be dynamically modified.
This test case will create a new VM (define and start)
and add a virtio disk and then modify it.
Signed-off-by: Sharad Mishra <snmishra(a)us.ibm.com>
diff -r c08c7fb1eb78 -r 52487783c9e1 suites/libvirt-cim/cimtest/VirtualSystemManagementService/30_dynamic_disk_mod.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/30_dynamic_disk_mod.py Wed Dec 29 13:59:17 2010 -0800
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+#
+# Copyright 2008 IBM Corp.
+#
+# Authors:
+# Sharad Mishra <snmishra(a)us.ibm.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import sys
+import pywbem
+from pywbem.cim_obj import CIMInstanceName
+from XenKvmLib import vsms
+from XenKvmLib import vxml
+from XenKvmLib.classes import get_typed_class
+from CimTest.Globals import logger
+from XenKvmLib.const import do_main
+from CimTest.ReturnCodes import FAIL, PASS
+from XenKvmLib import vsms_util
+
+sup_types = ['Xen', 'KVM', 'XenFV']
+default_dom = 'rstest_domain'
+
+@do_main(sup_types)
+def main():
+ options = main.options
+
+ if options.virt == 'KVM':
+ nddev = 'vda'
+ else:
+ nddev = 'xvdb'
+
+ service = vsms.get_vsms_class(options.virt)(options.ip)
+ cxml = vxml.get_class(options.virt)(default_dom)
+ classname = get_typed_class(options.virt, 'VirtualSystemSettingData')
+ inst_id = '%s:%s' % (options.virt, default_dom)
+ vssd_ref = CIMInstanceName(classname, keybindings = {
+ 'InstanceID' : inst_id,
+ 'CreationClassName' : classname})
+ dasd = vsms.get_dasd_class(options.virt)(dev=nddev,
+ source=cxml.secondary_disk_path,
+ name=default_dom)
+ disk_attr = { 'nddev' : nddev,
+ 'src_path' : cxml.secondary_disk_path
+ }
+
+ cxml.undefine(options.ip)
+ cxml = vxml.get_class(options.virt)(default_dom)
+ ret = cxml.cim_define(options.ip)
+ if not ret:
+ logger.error("Failed to define the dom: %s", default_dom)
+ return FAIL
+
+ ret = cxml.start(options.ip)
+ if not ret:
+ logger.error("Failed to start the dom: %s", default_dom)
+ return FAIL
+
+ status = vsms_util.add_disk_res(options.ip, service, cxml, vssd_ref,
+ dasd, disk_attr)
+ if status != PASS:
+ return FAIL
+ dasd = vsms.get_dasd_class(options.virt)(dev='vdc',
+ instanceid='rstest_domain/vda',
+ source='/home/rss.iso',
+ name=default_dom)
+
+ service = vsms.get_vsms_class(options.virt)(options.ip)
+ output = service.ModifyResourceSettings(ResourceSettings = [str(dasd)])
+
+ return status
+
+if __name__ == "__main__":
+ sys.exit(main())
+
13 years, 11 months
[PATCH] [TEST] Updated the test to create qcow2 storage volumes
by Sharad Mishra
# HG changeset patch
# User Sharad Mishra <snmishra(a)us.ibm.com>
# Date 1293219718 28800
# Node ID c08c7fb1eb78c209fd6af1fc40b9858b0027061b
# Parent d88da81a62f6c4cd6bfc0310f360e4d77863d9f4
[TEST] Updated the test to create qcow2 storage volumes.
This test has been updated to create not just raw storage volumes, but also qcow2.
Signed-off-by: Sharad Mishra <snmishra(a)us.ibm.com>
diff -r d88da81a62f6 -r c08c7fb1eb78 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py
--- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Wed Dec 22 18:37:01 2010 -0800
+++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Fri Dec 24 11:41:58 2010 -0800
@@ -42,6 +42,7 @@
from XenKvmLib.classes import get_typed_class, inst_to_mof
from XenKvmLib.common_util import destroy_diskpool
from XenKvmLib.pool import create_pool, undefine_diskpool, DIR_POOL
+from pywbem.cim_types import Uint64
pool_attr = { 'Path' : _image_dir }
vol_name = "cimtest-vol.img"
@@ -62,7 +63,7 @@
return PASS, rasd
-def get_stovol_settings(server, virt, dp_id, pool_name):
+def get_stovol_settings(server, virt, dp_id, pool_name, format):
status, dp_rasds = get_template_rasd_from_sdc(virt, server, dp_id)
if status != PASS:
logger.error("Failed to get the StorageVol RASD's")
@@ -79,7 +80,8 @@
if not pool_name in dpool_rasd['PoolID']:
return None
-
+ dpool_rasd['FormatType'] = Uint64(format)
+
stovol_settings = inst_to_mof(dpool_rasd)
return stovol_settings
@@ -197,67 +199,71 @@
# vol creation, we can extend dp_types to include netfs etc
dp_types = { "DISK_POOL_DIR" : DIR_POOL }
- for pool_name, pool_type in dp_types.iteritems():
- status = FAIL
- res = [FAIL]
- found = 0
- clean_pool=True
- try:
- if pool_type == DIR_POOL:
- pool_name = default_pool_name
- clean_pool=False
- else:
- status = create_pool(server, virt, pool_name, pool_attr,
- mode_type=pool_type, pool_type="DiskPool")
+ format_types = [1, 2]
- if status != PASS:
- logger.error("Failed to create pool '%s'", pool_name)
- return status
+ for fs in format_types:
+ for pool_name, pool_type in dp_types.iteritems():
+ status = FAIL
+ res = [FAIL]
+ found = 0
+ clean_pool=True
+ try:
+ if pool_type == DIR_POOL:
+ pool_name = default_pool_name
+ clean_pool=False
+ else:
+ status = create_pool(server, virt, pool_name, pool_attr,
+ mode_type=pool_type, pool_type="DiskPool")
- dp_inst_id = "%s/%s" % (dp_cn, pool_name)
- stovol_settings = get_stovol_settings(server, virt,
- dp_inst_id, pool_name)
- if stovol_settings == None:
- raise Exception("Failed to get the defualt StorageVolRASD info")
+ if status != PASS:
+ logger.error("Failed to create pool '%s'", pool_name)
+ return status
- disk_pool_inst = get_diskpool(server, virt, dp_cn, dp_inst_id)
- if disk_pool_inst == None:
- raise Exception("DiskPool instance for '%s' not found!" \
- % pool_name)
+ dp_inst_id = "%s/%s" % (dp_cn, pool_name)
+ stovol_settings = get_stovol_settings(server, virt,
+ dp_inst_id, pool_name, fs)
+ if stovol_settings == None:
+ raise Exception("Failed to get the defualt StorageVolRASD info")
+
+ disk_pool_inst = get_diskpool(server, virt, dp_cn, dp_inst_id)
+ if disk_pool_inst == None:
+ raise Exception("DiskPool instance for '%s' not found!" \
+ % pool_name)
- rpcs = get_typed_class(virt, "ResourcePoolConfigurationService")
- rpcs_conn = eval("rpcs_service." + rpcs)(server)
- res = rpcs_conn.CreateResourceInPool(Settings=stovol_settings,
- Pool=disk_pool_inst)
- if res[0] != PASS:
- raise Exception("Failed to create the Vol %s" % vol_name)
+ rpcs = get_typed_class(virt, "ResourcePoolConfigurationService")
+ rpcs_conn = eval("rpcs_service." + rpcs)(server)
+ res = rpcs_conn.CreateResourceInPool(Settings=stovol_settings,
+ Pool=disk_pool_inst)
+ if res[0] != PASS:
+ raise Exception("Failed to create the Vol %s" % vol_name)
- if res[1]['Resource']['InstanceID'] != exp_vol_path and \
- cim_rev >= libvirt_stovol_instance_id:
- raise Exception("Incorrect InstanceID")
- else:
- status = PASS
+ if res[1]['Resource']['InstanceID'] != exp_vol_path and \
+ cim_rev >= libvirt_stovol_instance_id:
+ raise Exception("Incorrect InstanceID")
+ else:
+ status = PASS
- found = verify_vol(server, virt, pool_name, exp_vol_path, found)
- stovol_status = verify_template_rasd_exists(virt, server,
- dp_inst_id,
- exp_vol_path)
+ found = verify_vol(server, virt, pool_name, exp_vol_path, found)
+ stovol_status = verify_template_rasd_exists(virt, server,
+ dp_inst_id,
+ exp_vol_path)
- ret = cleanup_pool_vol(server, virt, pool_name,
- clean_pool, exp_vol_path)
- if res[0] == PASS and found == 1 and \
- ret == PASS and stovol_status == PASS and \
- status == PASS:
- status = PASS
- else:
- return FAIL
+ ret = cleanup_pool_vol(server, virt, pool_name,
+ clean_pool, exp_vol_path)
+ if res[0] == PASS and found == 1 and \
+ ret == PASS and stovol_status == PASS and \
+ status == PASS:
+ status = PASS
+ else:
+ return FAIL
- except Exception, details:
- logger.error("Exception details: %s", details)
- cleanup_pool_vol(server, virt, pool_name,
- clean_pool, exp_vol_path)
- status = FAIL
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ cleanup_pool_vol(server, virt, pool_name,
+ clean_pool, exp_vol_path)
+ status = FAIL
return status
+
if __name__ == "__main__":
sys.exit(main())
13 years, 11 months
[PATCH] [TEST] New test to verify cdrom media change
by Sharad Mishra
# HG changeset patch
# User Sharad Mishra <snmishra(a)us.ibm.com>
# Date 1293071821 28800
# Node ID d88da81a62f6c4cd6bfc0310f360e4d77863d9f4
# Parent dac5cb514b9a7668b7717ac07bf02b9fcdf3a78d
[TEST] New test to verify cdrom media change.
Libvirt-cim now allows live cdrom media change.
This test checks that functionality.
Signed-off-by: Sharad Mishra <snmishra(a)us.ibm.com>
diff -r dac5cb514b9a -r d88da81a62f6 suites/libvirt-cim/cimtest/VirtualSystemManagementService/29_cdrom_media_change.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/29_cdrom_media_change.py Wed Dec 22 18:37:01 2010 -0800
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+#
+# Copyright 2010 IBM Corp.
+#
+# Authors:
+# Sharad Mishra <snmishra(a)us.ibm.com>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# Purpose:
+# Verify providers support disk images with long paths / names
+#
+# Steps:
+# 1) Define and start a guest with cdrom drive.
+# 2) Modify cdrom drive to point to another source.
+# 3) Verify guest is now pointing to new locations.
+#
+
+import sys
+import os
+from CimTest.Globals import logger
+from CimTest.ReturnCodes import FAIL, PASS, SKIP
+from XenKvmLib.classes import get_typed_class, inst_to_mof
+from XenKvmLib.rasd import get_rasd_templates
+from XenKvmLib.const import do_main, get_provider_version, \
+ KVM_disk_path, KVM_secondary_disk_path, \
+ default_pool_name
+from XenKvmLib.vxml import get_class
+from XenKvmLib import vxml
+from XenKvmLib.common_util import parse_instance_id
+from XenKvmLib.enumclass import EnumInstances
+from XenKvmLib import vsms
+from XenKvmLib import vsms_util
+from XenKvmLib.vsms import VIRT_DISK_TYPE_CDROM
+
+sup_types = ['KVM']
+test_dom = 'cddom'
+cdrom_rev = 1056
+
+def get_rasd_list(ip, virt, addr):
+ drasd_cn = get_typed_class(virt, "DiskResourceAllocationSettingData")
+ instanceid = "DiskPool/%s" % default_pool_name
+
+ rasds = get_rasd_templates(ip, virt, instanceid)
+ if len(rasds) < 1:
+ logger.info("No RASD templates returned for %s", pool_id)
+ return []
+
+ for rasd in rasds:
+ if rasd.classname != drasd_cn:
+ continue
+ if rasd['EmulatedType'] == VIRT_DISK_TYPE_CDROM and \
+ "Default" in rasd['InstanceID']:
+ rasd['source'] = addr
+ rasd['Address'] = addr
+ break
+ return rasd
+
+def change_cdrom_media(ip, virt, rasd, addr):
+ status = FAIL
+ service = vsms.get_vsms_class(virt)(ip)
+ cxml = vxml.get_class(virt)(test_dom)
+ dasd = vsms.get_dasd_class(virt)(dev=rasd['VirtualDevice'],
+ source=addr,
+ instanceid="cddom/hdc",
+ name=test_dom)
+
+ status = vsms_util.mod_disk_res(ip, service, cxml, dasd, addr)
+ return status
+
+def verify_cdrom_update(ip, virt, addr, guest_name):
+ inst = None
+
+ try:
+ drasd_cn = get_typed_class(virt, 'DiskResourceAllocationSettingData')
+ enum_list = EnumInstances(ip, drasd_cn)
+
+ if enum_list < 1:
+ raise Exception("No %s instances returned" % drasd_cn)
+
+ for rasd in enum_list:
+ guest, dev, status = parse_instance_id(rasd.InstanceID)
+ if status != PASS:
+ raise Exception("Unable to parse InstanceID: %s" % \
+ rasd.InstanceID)
+
+ if guest == guest_name:
+ inst = rasd
+ break
+
+ if inst is None or inst.Address != addr:
+ raise Exception("Expected Address to be of %s" % \
+ KVM_secondary_disk_path)
+
+ if inst.EmulatedType != VIRT_DISK_TYPE_CDROM:
+ raise Exception("Expected device to be of %d type" % \
+ VIRT_DISK_TYPE_FLOPPY)
+
+ except Exception, details:
+ logger.error(details)
+ return FAIL
+
+ return PASS
+
+@do_main(sup_types)
+def main():
+ options = main.options
+
+ status = FAIL
+
+ curr_cim_rev, changeset = get_provider_version(options.virt, options.ip)
+ if curr_cim_rev < cdrom_rev:
+ logger.error("cdrom media change support is available in rev >= %s", cdrom_rev)
+ return SKIP
+
+ cxml = get_class(options.virt)(test_dom)
+
+ addr = KVM_disk_path
+
+ guest_defined = False
+ guest_running = False
+
+ try:
+ rasd = get_rasd_list(options.ip, options.virt, addr)
+ rasd_list = {}
+ rasd_list[rasd.classname] = inst_to_mof(rasd)
+ if len(rasd_list) < 1:
+ raise Exception("Unable to get template RASDs for %s" % test_dom)
+
+ cxml.set_res_settings(rasd_list)
+ ret = cxml.cim_define(options.ip)
+ if not ret:
+ raise Exception("Unable to define %s" % test_dom)
+
+ guest_defined = True
+
+ ret = cxml.cim_start(options.ip)
+ if ret:
+ raise Exception("Unable to start %s" % test_dom)
+
+ guest_running = True
+
+ status = change_cdrom_media(options.ip, options.virt, rasd, KVM_secondary_disk_path)
+ if status != PASS:
+ raise Exception("Failed cdrom media change for %s" % test_dom)
+
+ status = verify_cdrom_update(options.ip, options.virt, KVM_secondary_disk_path, test_dom)
+ if status != PASS:
+ raise Exception("Failed to verify cdrom media change for %s" % test_dom)
+
+ except Exception, details:
+ logger.error(details)
+ status = FAIL
+
+ if guest_running == True:
+ cxml.destroy(options.ip)
+
+ if guest_defined == True:
+ cxml.undefine(options.ip)
+
+ return status
+
+if __name__ == "__main__":
+ sys.exit(main())
+
+
diff -r dac5cb514b9a -r d88da81a62f6 suites/libvirt-cim/lib/XenKvmLib/vsms.py
--- a/suites/libvirt-cim/lib/XenKvmLib/vsms.py Thu Oct 07 01:06:53 2010 -0400
+++ b/suites/libvirt-cim/lib/XenKvmLib/vsms.py Wed Dec 22 18:37:01 2010 -0800
@@ -146,13 +146,15 @@
# classes to define RASD parameters
class CIM_DiskResourceAllocationSettingData(CIMClassMOF):
- def __init__(self, dev, source, name, emu_type=None):
+ def __init__(self, dev, source, name, instanceid=None, emu_type=None):
self.ResourceType = RASD_TYPE_DISK
if emu_type != None:
self.EmulatedType = emu_type
if dev != None:
self.VirtualDevice = dev
self.InstanceID = '%s/%s' % (name, dev)
+ if instanceid != None:
+ self.InstanceID = instanceid
if source != None:
self.Address = source
13 years, 11 months