[PATCH] [TEST] Fixing 02_reverse.py of RAPF

# HG changeset patch # User Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # Date 1212068877 25200 # Node ID 9fae4065c84575412d10f7c1ea07f153a934db4c # Parent 3ac66cf562f082546883c1de0d748471b557cd39 [TEST] Fixing 02_reverse.py of RAPF. Added the following extra steps: 1) Defining a domain. 2) creating diskpool, netpool. Without the steps 1 and 2 the tc used to simply passes. The tc will fail on KVM with old libvirt-cim rpm as expected and will pass with the latest source with the fix that Dan submitted yest. Signed-off-by: Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> diff -r 3ac66cf562f0 -r 9fae4065c845 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri May 30 00:24:45 2008 +0800 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Thu May 29 06:47:57 2008 -0700 @@ -6,6 +6,7 @@ # Guolian Yun <yunguol@cn.ibm.com> # Kaitlin Rupert <karupert@us.ibm.com> # Zhengang Li <lizg@cn.ibm.com> +# Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public @@ -25,69 +26,149 @@ import sys import sys from VirtLib import utils from XenKvmLib import assoc -from XenKvmLib import devices +from XenKvmLib.test_doms import destroy_and_undefine_all +from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import logger, do_main -from CimTest.ReturnCodes import PASS, FAIL, XFAIL +from CimTest.ReturnCodes import PASS, FAIL +from XenKvmLib import enumclass +from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \ +create_netpool_conf + sup_types = ['Xen', 'XenFV', 'KVM'] +test_dom = "RAPF_dom" +test_vcpus = 1 +test_mem = 128 +test_mac = "00:11:22:33:44:aa" + +def setup_env(server, virt): + destroy_and_undefine_all(server) + vsxml = None + if virt == "Xen": + test_disk = "xvda" + else: + test_disk = "hda" + + virtxml = get_class(virt) + vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus, + mac = test_mac, disk = test_disk) + try: + ret = vsxml.define(server) + if not ret: + logger.error("Failed to Define the domain: %s", test_dom) + return FAIL, vsxml + + except Exception, details: + logger.error("Exception : %s", details) + return FAIL, vsxml + + return PASS, vsxml + +def get_rasd_or_pool_instid(server, virt, cn): + key_list = ["InstanceID"] + inst = [] + try: + inst = enumclass.enumerate(server, cn, key_list, virt) + except Exception: + logger.error(Globals.CIM_ERROR_ENUMERATE, cn) + return inst, FAIL + return inst, PASS + +def get_instance(server, virt, vsxml, cn, pool_list, app_val=0): + instances, status = get_rasd_or_pool_instid(server, virt, cn) + if status != PASS: + vsxml.undefine(server) + return pool_list, status + + if app_val == 1: + for inst in instances: + pool_list.append(inst.InstanceID) + return instances, pool_list, status + + +def verify_pool_from_RAPF(server, virt, instances, pool_instid_list, cn): + pool = [] + for inst in instances: + try: + pool = assoc.AssociatorNames(server, "ResourceAllocationFromPool", + cn, virt, InstanceID = inst.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES, inst.InstanceID) + status = FAIL + + if len(pool) < 1: + logger.error("No associated pool for %s", inst.InstanceID) + return FAIL + + if not pool[0]['InstanceID'] in pool_instid_list: + logger.error("InstanceID Mismatch") + return FAIL + + return PASS + +def get_inst_verify_pool_from_RAPF(server, virt, vsxml, pool_cn, cn): + pool_list = [] + pool, pool_list, status = get_instance(server, virt, vsxml, + pool_cn, pool_list, app_val=1) + if status != PASS: + return status + + devinst, pool_list, status = get_instance(server, virt, vsxml, cn, + pool_list, app_val=0) + if status != PASS: + return status + + status = verify_pool_from_RAPF(server, virt, devinst, pool_list, cn) + + if status != PASS: + vsxml.undefine(server) + + return status + @do_main(sup_types) def main(): options = main.options status = PASS + server = options.ip + virt = options.virt + + status,vsxml = setup_env(server, virt) + if status != PASS: + return status - key_list = ["DeviceID", "CreationClassName", "SystemName", - "SystemCreationClassName"] - try: - mem = devices.enumerate(options.ip, 'Memory', key_list, options.virt) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE % 'Memory') - return FAIL + status, diskid = create_diskpool_conf(server, virt) + if status != PASS: + return status - try: - proc = devices.enumerate(options.ip, 'Processor', key_list, options.virt) - except Exception: - logger.error(Globals.CIM_ERROR_ENUMERATE % 'Processor') - return FAIL - - for i in range(len(mem)): - try: - mempool = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", - "MemResourceAllocationSettingData", - options.virt, - InstanceID = mem[i].DeviceID) - except Exception: - logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % mem[i].DeviceID) - status = FAIL + status, test_network = create_netpool_conf(server, virt) + if status != PASS: + return status - if len(mempool) < 1: - logger.error("No associated pool for %s" % mem[i].DeviceID) - return FAIL + status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'MemoryPool', + 'MemResourceAllocationSettingData') + if status != PASS: + return status - if mempool[0].keybindings['InstanceID'] != "MemoryPool/0": - logger.error("MemResourceAllocationSettingData association error") - return FAIL - - for j in range(len(proc)): - try: - procpool = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", - "ProcResourceAllocationSettingData", - options.virt, - InstanceID = proc[j].DeviceID) - except Exception: - logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % proc[j].DeviceID) - return FAIL - - if len(procpool) < 1: - logger.error("No associated pool for %s" % proc[j].DeviceID) - return FAIL + status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'ProcessorPool', + 'ProcResourceAllocationSettingData') + if status != PASS: + return status - if procpool[0].keybindings['InstanceID'] != "ProcessorPool/0": - logger.error("ProcResourceAllocationSettingData association failed") - status = FAIL + status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'DiskPool', + 'DiskResourceAllocationSettingData') + if status != PASS: + return status + status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'NetworkPool', + 'NetResourceAllocationSettingData') + if status != PASS: + return status + + cleanup_restore(server, virt) + vsxml.undefine(server) return status if __name__ == "__main__":

diff -r 3ac66cf562f0 -r 9fae4065c845 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py
This fails for me on latest sources running with KVM: ResourceAllocationFromPool - 02_reverse.py: FAIL ERROR - No associated pool for demo2/hda CIM_ERR_FAILED: Unable to determine pool of `demo2/hda' demo2 is a KVM guest that I have defined. You'll want to make sure that the RASD you're verifying is the RASD that matches the guest you've defined.
+ +def get_rasd_or_pool_instid(server, virt, cn): + key_list = ["InstanceID"] + inst = [] + try: + inst = enumclass.enumerate(server, cn, key_list, virt) + except Exception: + logger.error(Globals.CIM_ERROR_ENUMERATE, cn) + return inst, FAIL + return inst, PASS
Instead of enumerating the RASDs and the pools, you can get the instance of the RASD and pool directly (since you should know the InstanceID of each). That way, you verify that the instance RAFP returns is the exact instance you're expected it to be.
+ +def get_instance(server, virt, vsxml, cn, pool_list, app_val=0): + instances, status = get_rasd_or_pool_instid(server, virt, cn) + if status != PASS: + vsxml.undefine(server) + return pool_list, status + + if app_val == 1: + for inst in instances: + pool_list.append(inst.InstanceID)
I wouldn't create a list of pools, I'd just do a GetInstance to get the expected pool instance for each case.
+ return instances, pool_list, status + + +def verify_pool_from_RAPF(server, virt, instances, pool_instid_list, cn):
This should be RAFP, not RAPF.
+ pool = [] + for inst in instances: + try: + pool = assoc.AssociatorNames(server, "ResourceAllocationFromPool", + cn, virt, InstanceID = inst.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES, inst.InstanceID) + status = FAIL + + if len(pool) < 1: + logger.error("No associated pool for %s", inst.InstanceID) + return FAIL + + if not pool[0]['InstanceID'] in pool_instid_list:
I'd do an exact match here. You should be doing a 1 to 1 comparison on InstanceIDs.
+ status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'MemoryPool', + 'MemResourceAllocationSettingData') + if status != PASS: + return status
If you need to call get_inst_verify_pool_from_RAPF() for each Pool/RASD type, you can create a list/dictionary with the argument information. You can then create a for loop that walks through the dictionary, that way you don't have to repeat the code block above 4 times. Something like: arg_list = { 'MemoryPool' : 'MemResourceAllocationSettingData', ... } for pool_cn, rasd_cn in arg_list.iteritems(): status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, pool_cn, rasd_cn) if status != PASS: return status -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

diff -r 3ac66cf562f0 -r 9fae4065c845 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py
This fails for me on latest sources running with KVM:
ResourceAllocationFromPool - 02_reverse.py: FAIL ERROR - No associated pool for demo2/hda CIM_ERR_FAILED: Unable to determine pool of `demo2/hda'
demo2 is a KVM guest that I have defined. What could be the circumstances in which the provider is unable to determine the pool type ? Because I had more than one doms created and
Kaitlin Rupert wrote: the same tc passed. Btw I have submitted the changes that for the tc.
You'll want to make sure that the RASD you're verifying is the RASD that matches the guest you've defined.
+ +def get_rasd_or_pool_instid(server, virt, cn): + key_list = ["InstanceID"] + inst = [] + try: + inst = enumclass.enumerate(server, cn, key_list, virt) + except Exception: + logger.error(Globals.CIM_ERROR_ENUMERATE, cn) + return inst, FAIL + return inst, PASS
Instead of enumerating the RASDs and the pools, you can get the instance of the RASD and pool directly (since you should know the InstanceID of each).
That way, you verify that the instance RAFP returns is the exact instance you're expected it to be.
+ +def get_instance(server, virt, vsxml, cn, pool_list, app_val=0): + instances, status = get_rasd_or_pool_instid(server, virt, cn) + if status != PASS: + vsxml.undefine(server) + return pool_list, status + + if app_val == 1: + for inst in instances: + pool_list.append(inst.InstanceID)
I wouldn't create a list of pools, I'd just do a GetInstance to get the expected pool instance for each case.
+ return instances, pool_list, status + + +def verify_pool_from_RAPF(server, virt, instances, pool_instid_list, cn):
This should be RAFP, not RAPF.
+ pool = [] + for inst in instances: + try: + pool = assoc.AssociatorNames(server, "ResourceAllocationFromPool", + cn, virt, InstanceID = inst.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES, inst.InstanceID) + status = FAIL + + if len(pool) < 1: + logger.error("No associated pool for %s", inst.InstanceID) + return FAIL + + if not pool[0]['InstanceID'] in pool_instid_list:
I'd do an exact match here. You should be doing a 1 to 1 comparison on InstanceIDs.
+ status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, 'MemoryPool', + 'MemResourceAllocationSettingData') + if status != PASS: + return status
If you need to call get_inst_verify_pool_from_RAPF() for each Pool/RASD type, you can create a list/dictionary with the argument information. You can then create a for loop that walks through the dictionary, that way you don't have to repeat the code block above 4 times.
Something like:
arg_list = { 'MemoryPool' : 'MemResourceAllocationSettingData', ... }
for pool_cn, rasd_cn in arg_list.iteritems(): status = get_inst_verify_pool_from_RAPF(server, virt, vsxml, pool_cn, rasd_cn) if status != PASS: return status

Deepti B Kalakeri wrote:
diff -r 3ac66cf562f0 -r 9fae4065c845 suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py
This fails for me on latest sources running with KVM:
ResourceAllocationFromPool - 02_reverse.py: FAIL ERROR - No associated pool for demo2/hda CIM_ERR_FAILED: Unable to determine pool of `demo2/hda'
demo2 is a KVM guest that I have defined. What could be the circumstances in which the provider is unable to determine the pool type ? Because I had more than one doms created and
Kaitlin Rupert wrote: the same tc passed.
cimtest creates a pool for images in /tmp. The guest I have defined has its image in /boot. However, I don't have a pool defined for /boot, so it's disk device doesn't belong to any pool.
Btw I have submitted the changes that for the tc.
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (3)
-
Deepti B Kalakeri
-
Deepti B. Kalakeri
-
Kaitlin Rupert