[PATCH] [TEST] #2 Updating 01_forward.py of EAFP

# HG changeset patch # User Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # Date 1216118958 25200 # Node ID a379943366703def9ecfe0a03779f91d3556a244 # Parent 64abdd1495dc05e69061151baf2ea25a682e8d8d [TEST] #2 Updating 01_forward.py of EAFP Changes: --------
From 1 to 2:
1) Added support for LXC. 2) Made only EAFP pool specific checks. 3) Removed eafp_list() used previously, instead added get_id to create the pool list . 4) Removed print_error(). 5) Removed get_id instead used get_inst. 6) Added support for Memory, Processor, NetworkPort to enumclass.py. Patch 1: ------- 1) Modifying the tc to support XenFV and KVM. 2) Modified get_keys() to use proper SystemCreationClassName. 3) Added functions get_id to get the instances for different logical devices so that the DeviceID of the instances can be used in the init_list(). 4) Added init_list() function to create a list of inputs for the EAFP association. 5) Added eafp_list() to create a list of pool values that will be used to verify the return values from the EAFP association. 6) Added the function verify_eafp_values() to call assocation on EAFP and verify the return values. 7) Included cleanup_restore(). Signed-off-by: Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> diff -r 64abdd1495dc -r a37994336670 suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Fri Jul 11 00:42:35 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Tue Jul 15 03:49:18 2008 -0700 @@ -42,165 +42,178 @@ import sys import pywbem -from XenKvmLib.test_xml import testxml, testxml_bridge -from VirtLib import utils -from XenKvmLib import assoc -from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all +from XenKvmLib.assoc import Associators +from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib import devices -from CimTest import Globals -from CimTest.Globals import do_main -from VirtLib.live import network_by_bridge -from CimTest.ReturnCodes import PASS, FAIL, SKIP +from XenKvmLib.enumclass import getInstance +from CimTest.Globals import CIM_ERROR_ASSOCIATORS, CIM_ERROR_GETINSTANCE +from XenKvmLib.vxml import get_class +from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore +from XenKvmLib.classes import get_typed_class +from XenKvmLib.logicaldevices import field_err +from CimTest.Globals import do_main, logger +from CimTest.ReturnCodes import PASS, FAIL -sup_types = ['Xen'] +sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] test_dom = "hd_domain" test_mac = "00:11:22:33:44:aa" test_vcpus = 1 -test_disk = 'xvda' +def get_keys(virt, cn, id): + sccn = get_typed_class(virt, "ComputerSystem") + cn = get_typed_class(virt, cn) + key_list = { 'DeviceID' : id, + 'CreationClassName' : cn, + 'SystemName' : test_dom, + 'SystemCreationClassName' : sccn + } + return key_list -def print_error(cn, detail): - Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, cn) - Globals.logger.error("Exception: %s", detail) +def get_inst(server, virt, cn, key, key_list): + inst = None + try: + inst = getInstance(server, cn, key_list, virt) -def get_keys(cn, device_id): - id = "%s/%s" % (test_dom, device_id) + except Exception, details: + logger.error("Exception %s" % details) + return None - key_list = { 'DeviceID' : id, - 'CreationClassName' : cn, - 'SystemName' : test_dom, - 'SystemCreationClassName' : "Xen_ComputerSystem" - } + if inst is None: + logger.error("Expected at least one %s instance" % cn) + return None - return key_list + return inst + +def init_list(server, virt, vsxml, test_disk): + lelist = {} + if virt != 'LXC': + cn_keys_list = { + "LogicalDisk" : "%s/%s" % (test_dom, test_disk), + "Memory" : "%s/%s" % (test_dom, "mem"), + "NetworkPort" : "%s/%s" % (test_dom, test_mac), + "Processor" : "%s/%s" % (test_dom, "0") + } + else: + cn_keys_list = { + "Memory" : "%s/%s" % (test_dom, "mem"), + } + + for cname, id in cn_keys_list.items(): + key_list = get_keys(virt, cname, id) + inst = get_inst(server, virt, cname, id, key_list) + if inst is None : + cleanup_restore(server, virt) + vsxml.destroy(server) + return FAIL, lelist + lelist [inst.CreationClassName] = inst.DeviceID + return PASS, lelist + +def get_pool_details(server, virt, vsxml, diskid): + gi_inst_list = {} + inst = None + if virt != 'LXC': + virt_network = vsxml.xml_get_net_network() + keys = { + 'DiskPool' : diskid, + 'ProcessorPool' : 'ProcessorPool/0' , + 'MemoryPool' : 'MemoryPool/0', + 'NetworkPool' : 'NetworkPool/%s' %virt_network + } + else: + keys = { + 'MemoryPool' : 'MemoryPool/0', + } + + for cn, k in keys.iteritems(): + key_list = {"InstanceID" : k} + inst = get_inst(server, virt, cn, k, key_list) + if inst is None: + cleanup_restore(server, virt) + vsxml.destroy(server) + return FAIL, gi_inst_list + cn = get_typed_class(virt, cn) + gi_inst_list[cn] = { 'InstanceID' : inst.InstanceID, + 'PoolID' : inst.PoolID + } + return PASS, gi_inst_list + +def verify_eafp_values(server, virt, in_pllist, gi_inst_list): + # Looping through the in_pllist to get association for devices. + status = PASS + an = get_typed_class(virt, "ElementAllocatedFromPool") + sccn = get_typed_class(virt, "ComputerSystem") + for cn, devid in sorted(in_pllist.iteritems()): + try: + assoc_info = Associators(server, an, cn, + DeviceID = devid, + CreationClassName = cn, + SystemName = test_dom, + SystemCreationClassName = sccn, + virt=virt) + if len(assoc_info) != 1: + logger.error("%s returned %i ResourcePool objects for " + "domain '%s'", an, len(assoc_info), + test_dom) + status = FAIL + break + assoc_eafp_info = assoc_info[0] + CCName = assoc_eafp_info.classname + gi_inst = gi_inst_list[CCName] + if assoc_eafp_info['InstanceID'] != gi_inst['InstanceID']: + field_err(assoc_eafp_info, gi_inst, 'InstanceID') + return FAIL + if assoc_eafp_info['PoolID'] != gi_inst['PoolID']: + field_err(assoc_eafp_info, gi_inst, 'PoolID') + return FAIL + except Exception, detail: + logger.error(CIM_ERROR_ASSOCIATORS, an) + logger.error("Exception: %s", detail) + cleanup_restore(server, virt) + status = FAIL + return status + @do_main(sup_types) def main(): options = main.options + server = options.ip + virt = options.virt status = PASS - idx = 0 + if virt == 'Xen': + test_disk = 'xvda' + else: + test_disk = 'hda' -# Getting the VS list and deleting the test_dom if it already exists. - destroy_and_undefine_all(options.ip) + # Getting the VS list and deleting the test_dom if it already exists. + destroy_and_undefine_all(server) + virt_type = get_class(virt) + if virt == 'LXC': + vsxml = virt_type(test_dom) + else: + vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac, + disk = test_disk) - test_xml, bridge = testxml_bridge(test_dom, vcpus = test_vcpus, \ - mac = test_mac, disk = test_disk, \ - server = options.ip) - if bridge == None: - Globals.logger.error("Unable to find virtual bridge") - return SKIP - - if test_xml == None: - Globals.logger.error("Guest xml not created properly") - return FAIL - - virt_network = network_by_bridge(bridge, options.ip) - if virt_network == None: - Globals.logger.error("No virtual network found for bridge %s", bridge) - return SKIP - - ret = test_domain_function(test_xml, options.ip, cmd = "create") + # Verify DiskPool on machine + status, diskid = create_diskpool_conf(server, virt) + if status != PASS: + return status + ret = vsxml.create(server) if not ret: - Globals.logger.error("Failed to Create the dom: %s", test_dom) + logger.error("Failed to Create the dom: '%s'", test_dom) return FAIL - try: - cn = "Xen_LogicalDisk" - key_list = get_keys(cn, test_disk) - disk = devices.Xen_LogicalDisk(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL + status, lelist = init_list(server, virt, vsxml, test_disk) + if status != PASS: + return status - try: - cn = "Xen_Memory" - key_list = get_keys(cn, "mem") - mem = devices.Xen_Memory(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - try: - cn = "Xen_NetworkPort" - key_list = get_keys(cn, test_mac) - net = devices.Xen_NetworkPort(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - try: - cn = "Xen_Processor" - key_list = get_keys(cn, "0") - proc = devices.Xen_Processor(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - netpool_id = "NetworkPool/%s" % virt_network - - lelist = { - "Xen_LogicalDisk" : disk.DeviceID, \ - "Xen_Memory" : mem.DeviceID, \ - "Xen_NetworkPort" : net.DeviceID, \ - "Xen_Processor" : proc.DeviceID - } - poollist = [ - "Xen_DiskPool", \ - "Xen_MemoryPool", \ - "Xen_NetworkPool", \ - "Xen_ProcessorPool" - ] - poolval = [ - "DiskPool/foo", \ - "MemoryPool/0", \ - netpool_id, \ - "ProcessorPool/0" - ] - - sccn = "Xen_ComputerSystem" - for cn, devid in sorted(lelist.items()): - try: - assoc_info = assoc.Associators(options.ip, \ - "Xen_ElementAllocatedFromPool", - cn, - DeviceID = devid, - CreationClassName = cn, - SystemName = test_dom, - SystemCreationClassName = sccn) - if len(assoc_info) != 1: - Globals.logger.error("Xen_ElementAllocatedFromPool returned %i\ - ResourcePool objects for domain '%s'", len(assoc_info), test_dom) - status = FAIL - break - - if assoc_info[0].classname != poollist[idx]: - Globals.logger.error("Classname Mismatch") - Globals.logger.error("Returned %s instead of %s", \ - assoc_info[0].classname, \ - poollist[idx]) - status = FAIL - - if assoc_info[0]['InstanceID'] != poolval[idx]: - Globals.logger.error("InstanceID Mismatch") - Globals.logger.error("Returned %s instead of %s", \ - assoc_info[0]['InstanceID'], \ - poolval[idx]) - status = FAIL - - if status != PASS: - break - else: - idx = idx + 1 - - except Exception, detail: - Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \ - 'Xen_ElementAllocatedFromPool') - Globals.logger.error("Exception: %s", detail) - status = FAIL - - ret = test_domain_function(test_dom, options.ip, \ - cmd = "destroy") + status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid) + if status != PASS: + return status + + status = verify_eafp_values(server, virt, lelist, gi_inst_list) + cleanup_restore(server, virt) + vsxml.destroy(server) return status if __name__ == "__main__": diff -r 64abdd1495dc -r a37994336670 suites/libvirt-cim/lib/XenKvmLib/enumclass.py --- a/suites/libvirt-cim/lib/XenKvmLib/enumclass.py Fri Jul 11 00:42:35 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/enumclass.py Tue Jul 15 03:49:18 2008 -0700 @@ -117,6 +117,30 @@ class Xen_LogicalDisk(CIM_LogicalDevice) pass class KVM_LogicalDisk(CIM_LogicalDevice): + pass + +class Xen_Memory(CIM_LogicalDevice): + pass + +class KVM_Memory(CIM_LogicalDevice): + pass + +class LXC_Memory(CIM_LogicalDevice): + pass + +class LXC_Memory(CIM_LogicalDevice): + pass + +class Xen_Processor(CIM_LogicalDevice): + pass + +class KVM_Processor(CIM_LogicalDevice): + pass + +class Xen_NetworkPort(CIM_LogicalDevice): + pass + +class KVM_NetworkPort(CIM_LogicalDevice): pass class Xen_MemoryPool(CIM_ResourcePool):

+def get_inst(server, virt, cn, key, key_list):
The key param is never used in this function.
+ inst = None + try: + inst = getInstance(server, cn, key_list, virt)
+ +def init_list(server, virt, vsxml, test_disk): + lelist = {} + if virt != 'LXC': + cn_keys_list = { + "LogicalDisk" : "%s/%s" % (test_dom, test_disk), + "Memory" : "%s/%s" % (test_dom, "mem"), + "NetworkPort" : "%s/%s" % (test_dom, test_mac), + "Processor" : "%s/%s" % (test_dom, "0") + } + else: + cn_keys_list = { + "Memory" : "%s/%s" % (test_dom, "mem"), + } + + for cname, id in cn_keys_list.items(): + key_list = get_keys(virt, cname, id)
Since the SSN and SN don't change, you could do the following instead: 1) Declare ssn somewhere before the for loop. 2) Declare the following above the for loop: key_list = { 'DeviceID' : None, 'CreationClassName' : None, 'SystemName' : test_dom, 'SystemCreationClassName' : sccn } 3) In the for loop: cn = get_typed_class(virt, cname) key_list['DeviceID'] = id key_list['CreationClassName'] = cn Or you can leave it as is. Either way is fine.
+ inst = get_inst(server, virt, cname, id, key_list)
You don't really need to call get_inst() here, unless you just want to verify the instances exist. The list you build below (lelist) should be identical to cn_keys_list (except for the key value of the list, lelist uses full CIM class names and the other does not).
+ +def verify_eafp_values(server, virt, in_pllist, gi_inst_list): + # Looping through the in_pllist to get association for devices. + status = PASS + an = get_typed_class(virt, "ElementAllocatedFromPool") + sccn = get_typed_class(virt, "ComputerSystem") + for cn, devid in sorted(in_pllist.iteritems()): + try: + assoc_info = Associators(server, an, cn, + DeviceID = devid, + CreationClassName = cn, + SystemName = test_dom, + SystemCreationClassName = sccn, + virt=virt) + if len(assoc_info) != 1: + logger.error("%s returned %i ResourcePool objects for " + "domain '%s'", an, len(assoc_info), + test_dom) + status = FAIL + break
You can just return FAIL here.
+ assoc_eafp_info = assoc_info[0] + CCName = assoc_eafp_info.classname + gi_inst = gi_inst_list[CCName] + if assoc_eafp_info['InstanceID'] != gi_inst['InstanceID']: + field_err(assoc_eafp_info, gi_inst, 'InstanceID') + return FAIL + if assoc_eafp_info['PoolID'] != gi_inst['PoolID']: + field_err(assoc_eafp_info, gi_inst, 'PoolID') + return FAIL
If you wanted, instead of building the gi_inst_list, you could pass in the instances returned from the get_inst() call in get_pool_details(). You could then call compare_all_prop(). Either way is fine though.
+ except Exception, detail: + logger.error(CIM_ERROR_ASSOCIATORS, an) + logger.error("Exception: %s", detail) + cleanup_restore(server, virt) + status = FAIL + return status +
@do_main(sup_types) def main(): options = main.options + server = options.ip + virt = options.virt status = PASS
I know this was already part of the test, but can you remove this line? Or can you set status = FAIL instead? If all of the failure paths aren't carefully checked, then having status = PASS can lead to returning a false positive.
- try: - cn = "Xen_LogicalDisk" - key_list = get_keys(cn, test_disk) - disk = devices.Xen_LogicalDisk(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL + status, lelist = init_list(server, virt, vsxml, test_disk) + if status != PASS: + return status
Need to call cleanup_restore() and destroy() here.
- - ret = test_domain_function(test_dom, options.ip, \ - cmd = "destroy") + status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid) + if status != PASS: + return status
Need to call cleanup_restore() and destroy() here. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
+def get_inst(server, virt, cn, key, key_list):
The key param is never used in this function.
+ inst = None + try: + inst = getInstance(server, cn, key_list, virt)
+ +def init_list(server, virt, vsxml, test_disk): + lelist = {} + if virt != 'LXC': + cn_keys_list = { + "LogicalDisk" : "%s/%s" % (test_dom, test_disk), + "Memory" : "%s/%s" % (test_dom, "mem"), + "NetworkPort" : "%s/%s" % (test_dom, test_mac), + "Processor" : "%s/%s" % (test_dom, "0") + } + else: + cn_keys_list = { + "Memory" : "%s/%s" % (test_dom, "mem"), + } + + for cname, id in cn_keys_list.items(): + key_list = get_keys(virt, cname, id)
Since the SSN and SN don't change, you could do the following instead:
1) Declare ssn somewhere before the for loop. 2) Declare the following above the for loop:
key_list = { 'DeviceID' : None, 'CreationClassName' : None, 'SystemName' : test_dom, 'SystemCreationClassName' : sccn }
3) In the for loop: cn = get_typed_class(virt, cname) key_list['DeviceID'] = id key_list['CreationClassName'] = cn
Or you can leave it as is. Either way is fine.
+ inst = get_inst(server, virt, cname, id, key_list)
You don't really need to call get_inst() here, unless you just want to verify the instances exist.
The list you build below (lelist) should be identical to cn_keys_list (except for the key value of the list, lelist uses full CIM class names and the other does not).
+ +def verify_eafp_values(server, virt, in_pllist, gi_inst_list): + # Looping through the in_pllist to get association for devices. + status = PASS + an = get_typed_class(virt, "ElementAllocatedFromPool") + sccn = get_typed_class(virt, "ComputerSystem") + for cn, devid in sorted(in_pllist.iteritems()): + try: + assoc_info = Associators(server, an, cn, + DeviceID = devid, + CreationClassName = cn, + SystemName = test_dom, + SystemCreationClassName = sccn, + virt=virt) + if len(assoc_info) != 1: + logger.error("%s returned %i ResourcePool objects for " + "domain '%s'", an, len(assoc_info), + test_dom) + status = FAIL + break
You can just return FAIL here.
+ assoc_eafp_info = assoc_info[0] + CCName = assoc_eafp_info.classname + gi_inst = gi_inst_list[CCName] + if assoc_eafp_info['InstanceID'] != gi_inst['InstanceID']: + field_err(assoc_eafp_info, gi_inst, 'InstanceID') + return FAIL + if assoc_eafp_info['PoolID'] != gi_inst['PoolID']: + field_err(assoc_eafp_info, gi_inst, 'PoolID') + return FAIL
If you wanted, instead of building the gi_inst_list, you could pass in the instances returned from the get_inst() call in get_pool_details(). You could then call compare_all_prop().
Either way is fine though. I prefer using compare_all_prop() in the ResourcePool related tc and would want to check only the ID's here.
+ except Exception, detail: + logger.error(CIM_ERROR_ASSOCIATORS, an) + logger.error("Exception: %s", detail) + cleanup_restore(server, virt) + status = FAIL + return status +
@do_main(sup_types) def main(): options = main.options + server = options.ip + virt = options.virt status = PASS
I know this was already part of the test, but can you remove this line? Or can you set status = FAIL instead? If all of the failure paths aren't carefully checked, then having status = PASS can lead to returning a false positive.
- try: - cn = "Xen_LogicalDisk" - key_list = get_keys(cn, test_disk) - disk = devices.Xen_LogicalDisk(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL + status, lelist = init_list(server, virt, vsxml, test_disk) + if status != PASS: + return status
Need to call cleanup_restore() and destroy() here.
The call to cleanup_restore() and destroy() is done before the above function returns failure.
- - ret = test_domain_function(test_dom, options.ip, \ - cmd = "destroy") + status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid) + if status != PASS: + return status
Need to call cleanup_restore() and destroy() here.
The call to cleanup_restore() and destroy() is done before the above function returns failure. Thanks and Regards, Deepti.
participants (3)
-
Deepti B Kalakeri
-
Deepti B. Kalakeri
-
Kaitlin Rupert