[PATCH] [TEST] Updating the 01_forward.py tc in SettingsDefineCapabilities

# HG changeset patch # User Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # Date 1208857494 -19800 # Node ID 320328bb0f70196ea52e6c0e9e254c681cfb5d60 # Parent 41a6c5cd50733a67d56d6612dad02705377a8b88 [TEST] Updating the 01_forward.py tc in SettingsDefineCapabilities 1) To support KVM and XenFV 2) To adapt to the new infrastructure changes Signed-off-by: Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> diff -r 41a6c5cd5073 -r 320328bb0f70 suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Mon Apr 21 16:55:52 2008 +0800 +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Tue Apr 22 15:14:54 2008 +0530 @@ -55,19 +55,17 @@ import sys import sys import os from distutils.file_util import move_file -from VirtLib import utils from XenKvmLib import assoc from XenKvmLib import enumclass -from CimTest import Globals -from CimTest.Globals import do_main from XenKvmLib.test_xml import netxml from XenKvmLib.test_doms import create_vnet from VirtLib.live import net_list from CimTest.ReturnCodes import PASS, FAIL, SKIP +from CimTest.Globals import do_main, platform_sup, logger, \ +CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS +from XenKvmLib.classes import get_typed_class -sup_types = ['Xen'] -status = PASS test_dpath = "foo" disk_file = '/tmp/diskpool.conf' back_disk_file = disk_file + "." + "SSDC_01_forward" @@ -79,12 +77,13 @@ def conf_file(): """ Creating diskpool.conf file. """ + status = PASS try: f = open(disk_file, 'w') f.write('%s %s' % (test_dpath, '/')) f.close() except Exception,detail: - Globals.logger.error("Exception: %s", detail) + logger.error("Exception: %s", detail) status = SKIP sys.exit(status) @@ -93,80 +92,109 @@ def clean_up_restore(): Restoring back the original diskpool.conf file. """ + status = PASS try: if os.path.exists(back_disk_file): os.remove(disk_file) move_file(back_disk_file, disk_file) except Exception, detail: - Globals.logger.error("Exception: %s", detail) + logger.error("Exception: %s", detail) status = SKIP sys.exit(status) -def get_or_bail(ip, id, pool_class): +def get_or_bail(virt, ip, id, pool_class): """ Getinstance for the CLass and return instance on success, otherwise exit after cleanup_restore . """ key_list = { 'InstanceID' : id } - + status = PASS try: - instance = enumclass.getInstance(ip, pool_class, key_list) + instance = enumclass.getInstance(ip, pool_class, key_list, virt) except Exception, detail: - Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, '%s' % pool_class) - Globals.logger.error("Exception: %s", detail) + logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class) + logger.error("Exception: %s", detail) clean_up_restore() status = FAIL sys.exit(status) return instance -def init_list(disk, mem, net, proc): +def init_list(virt, disk, mem, net, proc): """ Creating the lists that will be used for comparisons. """ instlist = [ - disk.InstanceID, \ - mem.InstanceID, \ - net.InstanceID, \ - proc.InstanceID - ] + disk.InstanceID, + mem.InstanceID, + net.InstanceID, + proc.InstanceID + ] cllist = [ - "Xen_DiskResourceAllocationSettingData", \ - "Xen_MemResourceAllocationSettingData", \ - "Xen_NetResourceAllocationSettingData", \ - "Xen_ProcResourceAllocationSettingData" + get_typed_class(virt, "DiskResourceAllocationSettingData"), + get_typed_class(virt, "MemResourceAllocationSettingData"), + get_typed_class(virt, "NetResourceAllocationSettingData"), + get_typed_class(virt, "ProcResourceAllocationSettingData") ] rtype = { - "Xen_DiskResourceAllocationSettingData" : 17, \ - "Xen_MemResourceAllocationSettingData" : 4, \ - "Xen_NetResourceAllocationSettingData" : 10, \ - "Xen_ProcResourceAllocationSettingData" : 3 + get_typed_class(virt, "DiskResourceAllocationSettingData") : 17, + get_typed_class(virt, "MemResourceAllocationSettingData") : 4, + get_typed_class(virt, "NetResourceAllocationSettingData") : 10, + get_typed_class(virt, "ProcResourceAllocationSettingData") : 3 } rangelist = { - "Default" : 0, \ - "Minimum" : 1, \ - "Maximum" : 2, \ + "Default" : 0, + "Minimum" : 1, + "Maximum" : 2, "Increment" : 3 } return instlist, cllist, rtype, rangelist -def print_error(index, fieldname, assoc_info, exp_value): - ret_value = assoc_info[index][fieldname] - Globals.logger.error("%s Mismatch", fieldname) - Globals.logger.error("Returned %s instead of %s", ret_value, exp_value) +def print_error(fieldname, ret_value, exp_value): + logger.error("%s Mismatch", fieldname) + logger.error("Returned %s instead of %s", ret_value, exp_value) -@do_main(sup_types) +def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist): + status = PASS + for inst in assoc_info: + if inst.classname != cllist[loop]: + print_error("Classname", inst.classname, cllist[loop]) + status = FAIL + if inst['ResourceType'] != rtype[cllist[loop]]: + print_error("ResourceType", inst['ResourceType'], + rtype[cllist[loop]]) + status = FAIL + ppolicy = inst['PropertyPolicy'] + if ppolicy != 0 and ppolicy != 1: + print_error("PropertyPolicy", inst['PropertyPolicy'], ppolicy) + status = FAIL + vrole = inst['ValueRole'] + if vrole < 0 or vrole > 4: + print_error("ValueRole", inst['ValueRole'], vrole) + status = FAIL + insid = inst['InstanceID'] + vrange = rangelist[insid] + if vrange != inst['ValueRange']: + print_error("ValueRange", inst['ValueRange'], vrange) + status = FAIL + if status != PASS: + break + return status + + +@do_main(platform_sup) def main(): options = main.options - global status - - cn = 'Xen_AllocationCapabilities' + + cn = get_typed_class(options.virt, "AllocationCapabilities") loop = 0 server = options.ip + virt = options.virt + status = PASS # Taking care of already existing diskconf file # Creating diskpool.conf if it does not exist @@ -179,11 +207,13 @@ def main(): conf_file() try : - disk = get_or_bail(server, id=diskid, \ - pool_class=enumclass.Xen_DiskPool) - mem = get_or_bail(server, id = memid, \ - pool_class=enumclass.Xen_MemoryPool) - vir_network = net_list(server) + disk_cn = eval("enumclass." + get_typed_class(options.virt, + "DiskPool")) + disk = get_or_bail(virt, server, id=diskid, pool_class=disk_cn) + mem_cn = eval("enumclass." + get_typed_class(options.virt, + "MemoryPool")) + mem = get_or_bail(virt, server, id = memid, pool_class=mem_cn) + vir_network = net_list(server, virt) if len(vir_network) > 0: test_network = vir_network[0] else: @@ -192,64 +222,42 @@ def main(): net_xml, bridge = netxml(server, bridgename, test_network) ret = create_vnet(server, net_xml) if not ret: - Globals.logger.error("Failed to create the Virtual Network '%s'", \ - test_network) + logger.error("Failed to create Virtual Network '%s'", + test_network) return SKIP netid = "%s/%s" % ("NetworkPool", test_network) - net = get_or_bail(server, id = netid, \ - pool_class=enumclass.Xen_NetworkPool) - proc = get_or_bail(server, id = procid, \ - pool_class=enumclass.Xen_ProcessorPool) + net_cn = eval("enumclass." + get_typed_class(options.virt, + "NetworkPool")) + net = get_or_bail(virt, server, id = netid, pool_class=net_cn) + proc_cn = eval("enumclass." + get_typed_class(options.virt, + "ProcessorPool")) + proc = get_or_bail(virt, server, id = procid, pool_class=proc_cn) except Exception, detail: - Globals.logger.error("Exception: %s", detail) + logger.error("Exception: %s", detail) clean_up_restore() status = FAIL return status - instlist, cllist, rtype, rangelist = init_list(disk, mem, net, proc ) - + instlist, cllist, rtype, rangelist = init_list(virt, disk, mem, net, proc ) + assoc_cname = get_typed_class(virt, "SettingsDefineCapabilities") for instid in sorted(instlist): try: - assoc_info = assoc.Associators(options.ip, \ - "Xen_SettingsDefineCapabilities", - cn, - InstanceID = instid) + assoc_info = assoc.Associators(options.ip, assoc_cname, cn, + InstanceID = instid, virt=virt) if len(assoc_info) != 4: - Globals.logger.error("Xen_SettingsDefineCapabilities returned \ -%i ResourcePool objects instead 4", len(assoc_info)) + logger.error("%s returned \ +%i ResourcePool objects instead 4", assoc_cname, len(assoc_info)) status = FAIL break - for i in range(len(assoc_info)): - if assoc_info[i].classname != cllist[loop]: - print_error(i, "Classname", assoc_info, cllist[loop]) - status = FAIL - if assoc_info[i]['ResourceType'] != rtype[cllist[loop]]: - print_error(i, "ResourceType", assoc_info, rtype[cllist[loop]]) - status = FAIL - ppolicy = assoc_info[i]['PropertyPolicy'] - if ppolicy != 0 and ppolicy != 1: - print_error(i, "PropertyPolicy", assoc_info, ppolicy) - status = FAIL - vrole = assoc_info[i]['ValueRole'] - if vrole < 0 or vrole > 4: - print_error(i, "ValueRole", assoc_info, vrole) - status = FAIL - insid = assoc_info[i]['InstanceID'] - vrange = rangelist[insid] - if vrange != assoc_info[i]['ValueRange']: - print_error(i, "ValueRange", assoc_info, vrange) - status = FAIL - if status != 0: - break - if status != 0: + status = verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist) + if status != PASS: break else: loop = loop + 1 except Exception, detail: - Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \ - 'Xen_SettingsDefineCapabilities') - Globals.logger.error("Exception: %s", detail) + logger.error(CIM_ERROR_ASSOCIATORS, assoc_cname) + logger.error("Exception: %s", detail) clean_up_restore() status = FAIL

-@do_main(sup_types) +def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist): + status = PASS + for inst in assoc_info: + if inst.classname != cllist[loop]: + print_error("Classname", inst.classname, cllist[loop]) + status = FAIL + if inst['ResourceType'] != rtype[cllist[loop]]: + print_error("ResourceType", inst['ResourceType'], + rtype[cllist[loop]]) + status = FAIL + ppolicy = inst['PropertyPolicy'] + if ppolicy != 0 and ppolicy != 1: + print_error("PropertyPolicy", inst['PropertyPolicy'], ppolicy) + status = FAIL + vrole = inst['ValueRole'] + if vrole < 0 or vrole > 4: + print_error("ValueRole", inst['ValueRole'], vrole) + status = FAIL + insid = inst['InstanceID'] + vrange = rangelist[insid] + if vrange != inst['ValueRange']: + print_error("ValueRange", inst['ValueRange'], vrange) + status = FAIL + if status != PASS: + break + return status + +
Nice job on turning this into a function. I think this makes things more readable. I know this is just a reshuffling of code, but can you modify it so that we return an error instead of just setting the status to FAIL? That way, we bail out immediately. Also, this test fails for me on F9 with KVM using current sources. This is because the current providers use diskpools (which you need a version of libvirt 0.4.0 or newer, I believe). Daisy was going to look into working out a disk pool fix, so you might want to check with her to see if she's still planning on this. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

-@do_main(sup_types) +def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist): + status = PASS + for inst in assoc_info: + if inst.classname != cllist[loop]: + print_error("Classname", inst.classname, cllist[loop]) + status = FAIL + if inst['ResourceType'] != rtype[cllist[loop]]: + print_error("ResourceType", inst['ResourceType'], + rtype[cllist[loop]]) + status = FAIL + ppolicy = inst['PropertyPolicy'] + if ppolicy != 0 and ppolicy != 1: + print_error("PropertyPolicy", inst['PropertyPolicy'],
libvirt-cim-bounces@redhat.com wrote on 2008-04-22 22:53:33: ppolicy)
+ status = FAIL + vrole = inst['ValueRole'] + if vrole < 0 or vrole > 4: + print_error("ValueRole", inst['ValueRole'], vrole) + status = FAIL + insid = inst['InstanceID'] + vrange = rangelist[insid] + if vrange != inst['ValueRange']: + print_error("ValueRange", inst['ValueRange'], vrange) + status = FAIL + if status != PASS: + break + return status + +
Nice job on turning this into a function. I think this makes things more readable. I know this is just a reshuffling of code, but can you modify it so that we return an error instead of just setting the status to FAIL? That way, we bail out immediately.
Also, this test fails for me on F9 with KVM using current sources. This is because the current providers use diskpools (which you need a version of libvirt 0.4.0 or newer, I believe).
Daisy was going to look into working out a disk pool fix, so you might want to check with her to see if she's still planning on this.
I'm looking into the EAFP 01_forward.py and EAFP 02_reverse.py failing issue, they fail for me on F8 with Xen on my own machine, but pass on the other machine. On the diskpool, the following is the list of what we have to do: 1) Move diskpool config functions to a library and updated related test case. 2) Create diskpool file function in the same library as above. I'm not sure if I catch up the essential difference between diskpool and diskpool file? Why libvirt-0.4.0 or newer ignore the diskpool stuff and use a file? 3) Add function to check the version of libvirt. If libvirt >= 0.4.0, we'll need to call creating diskpool file function. Any better way for geting the libvirt version except parsing the output of "rpm -qa libvirt"? At now, we put the diskpool.conf under /tmp, Dan and Katillin have discussed the best location for it, so what's the result? Thoughts?
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

Guo Lian Yun wrote:
libvirt-cim-bounces@redhat.com wrote on 2008-04-22 22:53:33:
-@do_main(sup_types) +def verify_rasd_fields(loop, assoc_info, cllist, rtype, rangelist): + status = PASS + for inst in assoc_info: + if inst.classname != cllist[loop]: + print_error("Classname", inst.classname, cllist[loop]) + status = FAIL + if inst['ResourceType'] != rtype[cllist[loop]]: + print_error("ResourceType", inst['ResourceType'], + rtype[cllist[loop]]) + status = FAIL + ppolicy = inst['PropertyPolicy'] + if ppolicy != 0 and ppolicy != 1: + print_error("PropertyPolicy", inst['PropertyPolicy'], ppolicy) + status = FAIL + vrole = inst['ValueRole'] + if vrole < 0 or vrole > 4: + print_error("ValueRole", inst['ValueRole'], vrole) + status = FAIL + insid = inst['InstanceID'] + vrange = rangelist[insid] + if vrange != inst['ValueRange']: + print_error("ValueRange", inst['ValueRange'], vrange) + status = FAIL + if status != PASS: + break + return status + +
Nice job on turning this into a function. I think this makes things more readable. I know this is just a reshuffling of code, but can you modify it so that we return an error instead of just setting the status to FAIL? That way, we bail out immediately.
Also, this test fails for me on F9 with KVM using current sources. This is because the current providers use diskpools (which you need a version of libvirt 0.4.0 or newer, I believe).
I use the libvirt-cim rpm on my F9 machine for testing. Do you want me to use the new provider source for testing on F9 machine also ?
Daisy was going to look into working out a disk pool fix, so you might want to check with her to see if she's still planning on this.
I'm looking into the EAFP 01_forward.py and EAFP 02_reverse.py failing issue, they fail for me on F8 with Xen on my own machine, but pass on the other machine.
On the diskpool, the following is the list of what we have to do:
1) Move diskpool config functions to a library and updated related test case.
2) Create diskpool file function in the same library as above. I'm not sure if I catch up the essential difference between diskpool and diskpool file? Why libvirt-0.4.0 or newer ignore the diskpool stuff and use a file?
3) Add function to check the version of libvirt. If libvirt >= 0.4.0, we'll need to call creating diskpool file function. Any better way for geting the libvirt version except parsing the output of "rpm -qa libvirt"?
At now, we put the diskpool.conf under /tmp, Dan and Katillin have discussed the best location for it, so what's the result?
Thoughts?
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
------------------------------------------------------------------------
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

I use the libvirt-cim rpm on my F9 machine for testing. Do you want me to use the new provider source for testing on F9 machine also ?
We want the test cases to pass/xfail/skip appropriately on F9 with both the release rpm and current sources. This is useful because we can use the test suite as a way of determining problems that exist in the release rpm that haven't been fixed upstream. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Nice job on turning this into a function. I think this makes things more readable. I know this is just a reshuffling of code, but can you modify it so that we return an error instead of just setting the status to FAIL? That way, we bail out immediately.
Also, this test fails for me on F9 with KVM using current sources. This is because the current providers use diskpools (which you need a version of libvirt 0.4.0 or newer, I believe).
Daisy was going to look into working out a disk pool fix, so you might want to check with her to see if she's still planning on this.
I'm looking into the EAFP 01_forward.py and EAFP 02_reverse.py failing issue, they fail for me on F8 with Xen on my own machine, but pass on the other machine.
On the diskpool, the following is the list of what we have to do:
1) Move diskpool config functions to a library and updated related test case.
2) Create diskpool file function in the same library as above.
I was thinking you could use a wrapper function. So the test cases only need to call one function. The function can behave appropriately based on the version of libvirt.
I'm not sure if I catch up the essential difference between diskpool and diskpool file? Why libvirt-0.4.0 or newer ignore the diskpool stuff and use a file?
libvirt didn't add storage pool support until 0.4.1. Sorry, all of my previous mails were incorrect. I checked the libvirt source, and it looks like 0.4.1 has the storage pool support. Since older versions of libvirt don't have storage support, the provider uses a configuration file for its diskpool info.
3) Add function to check the version of libvirt. If libvirt >= 0.4.0, we'll need to call creating diskpool file function.
If libvirt <= 0.4.0, create a diskpool file function.
Any better way for geting the libvirt version except parsing the output of "rpm -qa libvirt"?
virsh --version will give you the version of libvirt. It is probably more reliable because it's possible that someone installed an rpm and then updated the version from source.
At now, we put the diskpool.conf under /tmp, Dan and Katillin have discussed the best location for it, so what's the result?
Good point. I think the plan is to store the diskpool.conf file in /etc/libvirt. However, I'll need to work on a patch for this. So, for now, plan on using /tmp. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (4)
-
Deepti B Kalakeri
-
Deepti B. Kalakeri
-
Guo Lian Yun
-
Kaitlin Rupert