libvirt-cim-bounces@redhat.com wrote on 2008-06-20
04:42:55:
> # HG changeset patch
> # User Kaitlin Rupert <karupert@us.ibm.com>
> # Date 1213907999 25200
> # Node ID 6bbf46ffb58f0de81ff2606c5c27284d3b997ed2
> # Parent 1ea2eaa7349fb3c16281c4071b252104c7fde5c2
> [TEST] #2 Fix VSMS 08_modifyresource.py on Xen and KVM.
>
> This still fails on XenFV because of a provider bug. It looks
like
> the ModifyResource call strips the <emulator> tag from a XenFV
> guest. So the test is unable to start the guest.
>
> Updates to this test:
> -Call create_netpool_conf() to create a new network pool to
use in
> modify calls.
> -Create network RASD instead of bridge
> -Replaced modify calls with functions from vsms_util
> -Added support for modifying a running guest (mem and vcpu
only)
> -Remove XFAIL
>
> Updates from patch 1 to patch 2:
> -After undefining the guests in the loop, get a new XML object
so
> the guest is defined using a default XML (not a modified one).
> -Instead of changing vcpus from 1 to 3, start out with a 2 vcpu
> guest and change down to 1. This ensures that the guest is a
SMP
> guest starting out.
> -Instad of changing memory from 128 to 78, use 128 to 256 instead.
> 78 isn't much memory for a guest to run.
>
> Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
>
> diff -r 1ea2eaa7349f -r 6bbf46ffb58f suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/08_modifyresource.py
> --- a/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/08_modifyresource.py
> Tue Jun 10 18:26:16 2008 -0700
> +++ b/suites/libvirt-
> cim/cimtest/VirtualSystemManagementService/08_modifyresource.py
> Thu Jun 19 13:39:59 2008 -0700
> @@ -26,78 +26,97 @@
> import pywbem
> from pywbem.cim_obj import CIMInstanceName
> from VirtLib import utils
> +from VirtLib.live import network_by_bridge
> from XenKvmLib import vsms
> from XenKvmLib import vxml
> from CimTest.Globals import logger
> from CimTest.Globals import do_main
> -from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC
> +from CimTest.ReturnCodes import PASS, FAIL
> +from XenKvmLib.common_util import create_netpool_conf, destroy_netpool
> +from XenKvmLib import vsms_util
>
> sup_types = ['Xen', 'KVM', 'XenFV']
> default_dom = 'rstest_domain'
> -ntype = 'bridge'
> -ncpu = 3
> -nmem = 78
> +ntype = 'network'
> +cpu = 2
> +ncpu = 1
> +nmem = 256
>
> -bug_cpu = '90079'
> +def cleanup_env(ip, virt, cxml, net_name):
> + destroy_netpool(ip, virt, net_name)
> + cxml.destroy(ip)
> + cxml.undefine(ip)
>
> @do_main(sup_types)
> def main():
> options = main.options
>
> service = vsms.get_vsms_class(options.virt)(options.ip)
> - cxml = vxml.get_class(options.virt)(default_dom)
> + cxml = vxml.get_class(options.virt)(default_dom, vcpus=cpu)
> ndpath = cxml.secondary_disk_path
> dasd = vsms.get_dasd_class(options.virt)(dev=cxml.xml_get_disk_dev(),
>
source=ndpath,
>
name=default_dom)
> +
> + status, net_name = create_netpool_conf(options.ip,
options.virt,
> +
use_existing=False)
> + if status != PASS:
> + logger.error('Unable to find a network
pool')
> + return FAIL
> +
> nasd = vsms.get_nasd_class(options.virt)(type=ntype,
>
mac=cxml.xml_get_net_mac(),
> -
name=default_dom)
> +
name=default_dom,
> +
virt_net=net_name)
> masd = vsms.get_masd_class(options.virt)(megabytes=nmem,
> name=default_dom)
> pasd = vsms.get_pasd_class(options.virt)(vcpu=ncpu,
name=default_dom)
>
> status = FAIL
> - rc = 0
> - try:
> - cxml.define(options.ip)
> - # Modify disk setting
> - service.ModifyResourceSettings(ResourceSettings
= [str(dasd)])
> - cxml.dumpxml(options.ip)
> - dpath = cxml.xml_get_disk_source()
> - if dpath != ndpath:
> - raise Exception('Error
changing rs for disk path')
> - logger.info('good status for disk path')
> - # Modify net setting
> - service.ModifyResourceSettings(ResourceSettings
= [str(nasd)])
> - cxml.dumpxml(options.ip)
> - type = cxml.xml_get_net_type()
> - if type != ntype:
> - raise Exception('Error
changing rs for net mac')
> - logger.info('good status for net mac')
> - # Modify memory resource setting
> - service.ModifyResourceSettings(ResourceSettings=[str(masd)])
> - cxml.dumpxml(options.ip)
> - mem = cxml.xml_get_mem()
> - if mem != '%i' % (nmem * 1024):
> - raise Exception('Error
changing rs for mem')
> - logger.info('good status for mem')
> - # Modify cpu setting
> - service.ModifyResourceSettings(ResourceSettings
= [str(pasd)])
> - cxml.dumpxml(options.ip)
> - cpu = cxml.xml_get_vcpu()
> - if cpu != '%i' % ncpu:
> - rc = -1
> - raise Exception('Error
changing rs for vcpu')
> - logger.info('good status for vcpu')
> - status = PASS
> - except Exception, details:
> - logger.error('Error invoking ModifyRS')
> - logger.error(details)
> - return FAIL
> +
> + if options.virt == "KVM":
> + test_cases = ["define"]
> + else:
> + test_cases = ["define", "start"]
>
> - cxml.undefine(options.ip)
> - if rc == -1:
> - return XFAIL_RC(bug_cpu)
> + for case in test_cases:
> + #Each time through, define guest using
a default XML
> + cxml.undefine(options.ip)
> + cxml = vxml.get_class(options.virt)(default_dom,
vcpus=cpu)
> + ret = cxml.define(options.ip)
> + if not ret:
> + logger.error("Failed
to define the dom: %s", default_dom)
> + cleanup_env(options.ip,
options.virt, cxml, net_name)
> + return FAIL
> + if case == "start":
> + ret = cxml.start(options.ip)
> + if not ret:
> + logger.error("Failed
to start the dom: %s", default_dom)
> + cleanup_env(options.ip,
options.virt, cxml, net_name)
> + return FAIL
> +
> + status = vsms_util.mod_vcpu_res(options.ip,
service, cxml,
> pasd, ncpu,
> +
options.virt)
> + if status != PASS:
> + break
> +
> + status = vsms_util.mod_mem_res(options.ip,
service, cxml, masd, nmem)
> + if status != PASS:
> + break
> +
> + #Unable to modify net and disk devices
while guest is running
> + if case == "start":
> + break
> +
> + status = vsms_util.mod_disk_res(options.ip,
service, cxml,
> dasd, ndpath)
> + if status != PASS:
> + break
> +
> + status = vsms_util.mod_net_res(options.ip,
service,
> options.virt, cxml,
> +
nasd, ntype,
net_name)
> + if status != PASS:
> + break
> +
> + cleanup_env(options.ip, options.virt, cxml, net_name)
>
> return status
>
>
+1 from me.
> _______________________________________________
> Libvirt-cim mailing list
> Libvirt-cim@redhat.com
> https://www.redhat.com/mailman/listinfo/libvirt-cim