[PATCH] [TEST] #3 Update HostSystem.04 for XenFV/KVM/LXC support

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1217471504 25200 # Node ID d095c79471e6b96828b560b6639f5945e86ba4f2 # Parent fc92abb3ae7cab32dec6718412be1c0d88874a4a [TEST] #3 Update HostSystem.04 for XenFV/KVM/LXC support Updates from 2 to 3: Clean up the diskpool/netpool/guest before returning Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r fc92abb3ae7c -r d095c79471e6 suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py --- a/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Wed Jul 30 06:55:31 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Wed Jul 30 19:31:44 2008 -0700 @@ -52,17 +52,17 @@ from CimTest.Globals import logger, CIM_ from CimTest.Globals import logger, CIM_ERROR_ASSOCIATORNAMES, \ CIM_ERROR_ASSOCIATORS from CimTest.Globals import do_main +from XenKvmLib.vxml import XenXML, KVMXML, get_class from XenKvmLib.assoc import AssociatorNames, Associators from XenKvmLib.common_util import get_host_info +from XenKvmLib.classes import get_typed_class from CimTest.ReturnCodes import PASS, FAIL, SKIP -from XenKvmLib.test_xml import testxml_bridge -from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all -from VirtLib.live import network_by_bridge +from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib.logicaldevices import verify_device_values from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file +create_diskpool_file, create_netpool_conf, destroy_netpool -sup_types = ['Xen'] +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] test_dom = "CrossClass_GuestDom" test_mac = "00:11:22:33:44:aa" @@ -72,30 +72,6 @@ diskid = "%s/%s" % ("DiskPool", test_dpa diskid = "%s/%s" % ("DiskPool", test_dpath) memid = "%s/%s" % ("MemoryPool", 0) procid = "%s/%s" % ("ProcessorPool", 0) - -def setup_env(server): - destroy_and_undefine_all(server) - test_xml, bridge = testxml_bridge(test_dom, mem = test_mem, - vcpus = test_vcpus, mac = test_mac, - disk = test_disk, server = server) - if bridge == None: - logger.error("Unable to find virtual bridge") - return SKIP - - if test_xml == None: - logger.error("Guest xml was not created properly") - return FAIL - - virt_network = network_by_bridge(bridge, server) - if virt_network == None: - logger.error("No virtual network found for bridge %s", bridge) - return SKIP - - ret = test_domain_function(test_xml, server, cmd = "create") - if not ret: - logger.error("Failed to create the dom: %s", test_dom) - return FAIL - return PASS def print_err(err, detail, cn): logger.error(err % cn) @@ -112,35 +88,43 @@ def pool_init_list(pool_assoc): in_pllist[classname_keyvalue] = instid return in_pllist -def eapf_list(): +def eapf_list(server, virt="Xen"): + disk_inst = get_typed_class(virt, "LogicalDisk") + proc_inst = get_typed_class(virt, "Processor") + net_inst = get_typed_class(virt, "NetworkPort") + mem_inst = get_typed_class(virt, "Memory") + disk = { 'SystemName' : test_dom, - 'CreationClassName' : "Xen_LogicalDisk", - 'DeviceID' : "%s/%s" % (test_dom,test_disk), + 'CreationClassName' : disk_inst, + 'DeviceID' : "%s/%s" % (test_dom, test_disk), 'Name' : test_disk } proc = { 'SystemName' : test_dom, - 'CreationClassName' : "Xen_Processor", + 'CreationClassName' : proc_inst, 'DeviceID' : "%s/%s" % (test_dom,0) } net = { 'SystemName' : test_dom, - 'CreationClassName' : "Xen_NetworkPort", + 'CreationClassName' : net_inst, 'DeviceID' : "%s/%s" % (test_dom, test_mac), 'NetworkAddresses' : test_mac } mem = { 'SystemName' : test_dom, - 'CreationClassName' : "Xen_Memory", + 'CreationClassName' : mem_inst, 'DeviceID' : "%s/%s" % (test_dom, "mem"), 'NumberOfBlocks' : test_mem * 1024 } - eaf_values = { "Xen_Processor" : proc, - "Xen_LogicalDisk" : disk, - "Xen_NetworkPort" : net, - "Xen_Memory" : mem - } + if virt == "LXC": + eaf_values = { mem_inst : mem} + else: + eaf_values = { proc_inst : proc, + disk_inst : disk, + net_inst : net, + mem_inst : mem + } return eaf_values def get_inst_for_dom(assoc_val): @@ -152,13 +136,14 @@ def get_inst_for_dom(assoc_val): return dom_list -def get_assocname_info(server, cn, an, qcn, hostname): +def get_assocname_info(server, cn, an, qcn, hostname, virt="Xen"): status = PASS assoc_info = [] try: assoc_info = AssociatorNames(server, an, cn, + virt, CreationClassName=cn, Name = hostname) if len(assoc_info) < 1: @@ -172,7 +157,6 @@ def get_assocname_info(server, cn, an, q if status != PASS: cleanup_restore(server, virt='Xen') - test_domain_function(test_dom, server, "destroy") return status, assoc_info @@ -183,24 +167,24 @@ def check_len(an, assoc_list_info, qcn, return FAIL return PASS -def verify_eafp_values(server, in_pllist): +def verify_eafp_values(server, in_pllist, virt="Xen"): # Looping through the in_pllist to get association for various pools. status = PASS - an = "Xen_ElementAllocatedFromPool" + an = get_typed_class(virt, "ElementAllocatedFromPool") exp_len = 1 qcn = "Logical Devices" - eafp_values = eapf_list() + eafp_values = eapf_list(server, virt) for cn, instid in sorted(in_pllist.items()): try: - assoc_info = Associators(server, an, cn, InstanceID = instid) + assoc_info = Associators(server, an, cn, virt, InstanceID = instid) inst_list = get_inst_for_dom(assoc_info) status = check_len(an, inst_list, qcn, exp_len) if status != PASS: break assoc_eafp_info = inst_list[0] CCName = assoc_eafp_info['CreationClassName'] - status = verify_device_values(assoc_eafp_info, CCName, - eafp_values, virt='Xen') + status = verify_device_values(assoc_eafp_info, + eafp_values, virt) if status != PASS: return status @@ -216,41 +200,69 @@ def main(): def main(): options= main.options server = options.ip - virt=options.virt + if options.virt == "XenFV": + virt = "Xen" + else: + virt=options.virt # Get the host info - status, host_name, classname = get_host_info(server) + status, host_name, classname = get_host_info(server, virt) if status != PASS: return status + destroy_and_undefine_all(server) + virt_type = get_class(virt) + if virt == 'LXC': + vsxml = virt_type(test_dom) + else: + vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac, + disk = test_disk) + ret = vsxml.define(server) + if not ret: + logger.error("Failed to define the dom: '%s'", test_dom) + return FAIL # Verify DiskPool on machine status = create_diskpool_file() if status != PASS: - return status + logger.error("Failed to create diskpool") + vsxml.undefine(server) + return FAIL + # Create NetPool on machine + status, net_name = create_netpool_conf(server, virt, use_existing=False) + if status != PASS: + logger.error('Unable to create network pool') + vsxml.undefine(server) + cleanup_restore(server, virt=virt) + return FAIL # Get the hostedResourcePool info first cn = classname - an = "Xen_HostedResourcePool" + an = get_typed_class(virt, "HostedResourcePool") qcn = "Device Pool" - status, pool = get_assocname_info(server, cn, an, qcn, host_name) + status, pool = get_assocname_info(server, cn, an, qcn, host_name, virt) if status != PASS: + vsxml.undefine(server) + cleanup_restore(server, virt=virt) + destroy_netpool(server, virt, net_name) return status - - # One pool for each Device type, hence len should be 4 + # One pool for each Device type, hence len should be 4 exp_len = 4 - status = status = check_len(an, pool, qcn, exp_len) + status = check_len(an, pool, qcn, exp_len) if status != PASS: + vsxml.undefine(server) + cleanup_restore(server, virt=virt) + destroy_netpool(server, virt, net_name) return FAIL - status = setup_env(server) - if status != PASS: - cleanup_restore(server, virt=virt) - test_domain_function(test_dom, server, cmd = "destroy") - return status - in_pllist = pool_init_list(pool) - status = verify_eafp_values(server, in_pllist) - ret = test_domain_function(test_dom, server, cmd = "destroy") + if virt == 'LXC': + for cn, instid in in_pllist.items(): + if cn == 'LXC_MemoryPool': + in_pllist = {cn: instid} + break + status = verify_eafp_values(server, in_pllist, virt) + vsxml.undefine(server) cleanup_restore(server, virt=virt) + destroy_netpool(server, virt, net_name) return status if __name__ == "__main__": sys.exit(main())
participants (1)
-
yunguol@cn.ibm.com