[PATCH] [TEST] Resubmit RAFP.01 for LXC support, also pass for Xen, KVM and XenFV

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1214285339 -28800 # Node ID a79e3dc30ada12232bd948478ce0908c35ad9f41 # Parent c12f7b4a86e606c4fb8c1f32bc3a98bf59b75550 [TEST] Resubmit RAFP.01 for LXC support, also pass for Xen, KVM and XenFV Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r c12f7b4a86e6 -r a79e3dc30ada suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Thu Jun 19 22:49:43 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Tue Jun 24 13:28:59 2008 +0800 @@ -26,66 +26,125 @@ from VirtLib import utils from XenKvmLib import assoc from XenKvmLib import enumclass -from XenKvmLib.classes import get_typed_class +from XenKvmLib.classes import get_typed_class +from XenKvmLib.test_doms import destroy_and_undefine_all +from XenKvmLib.vxml import get_class from CimTest import Globals from CimTest.Globals import logger, do_main from CimTest.ReturnCodes import PASS, FAIL, XFAIL +from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \ +create_netpool_conf -sup_types = ['Xen', 'XenFV', 'KVM'] +sup_types = ['Xen', 'XenFV', 'KVM', 'LXC'] +test_dom = "RAFP_dom" +test_vcpus = 1 +test_mem = 128 +test_mac = "00:11:22:33:44:aa" + +def setup_env(server, virt): + destroy_and_undefine_all(server) + vsxml = None + if virt == "Xen": + test_disk = "xvda" + elif virt == "XenFV" or virt=="KVM": + test_disk = "hda" + else: + test_disk = None + + virtxml = get_class(virt) + if virt == 'LXC': + vsxml = virtxml(test_dom) + else: + vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus, + mac = test_mac, disk = test_disk) + try: + ret = vsxml.define(server) + if not ret: + logger.error("Failed to Define the domain: %s", test_dom) + return FAIL, vsxml, test_disk + + except Exception, details: + logger.error("Exception : %s", details) + return FAIL, vsxml, test_disk + + return PASS, vsxml, test_disk + +def get_instance(server, pool, list, virt='Xen'): + try: + inst = enumclass.getInstance(server, + pool, + list, + virt) + except Exception: + logger.error(Globals.CIM_ERROR_GETINSTANCE % pool) + return FAIL, inst + + return PASS, inst + +def verify_rasd(server, assoc_cn, cn, virt, list, rasd): + try: + data = assoc.AssociatorNames(server, + assoc_cn, + cn, + virt, + InstanceID=list) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % cn) + return FAIL + + if data[0]['InstanceID'] not in rasd: + logger.info ('ID %s' % data[0]['InstanceID']) + logger.error("InstanceID Mismatch") + logger.error("Returned %s error" % data[0]['InstanceID']) + return FAIL + + return PASS + @do_main(sup_types) def main(): options = main.options status = PASS - try: - key_list = { 'InstanceID' : "MemoryPool/0" } - mempool = enumclass.getInstance(options.ip, - "MemoryPool", - key_list, - options.virt) - except Exception: - logger.error(Globals.CIM_ERROR_GETINSTANCE % "MemoryPool") - return FAIL + + status, vsxml, test_disk = setup_env(options.ip, options.virt) + if status != PASS: + return status + + status, diskid = create_diskpool_conf(options.ip, options.virt) + if status != PASS: + cleanup_restore(options.ip, options.virt) + vsxml.undefine(options.ip) + return status - try: - key_list = { 'InstanceID' : "ProcessorPool/0" } - procpool = enumclass.getInstance(options.ip, - "ProcessorPool", - key_list, - options.virt) - except Exception: - logger.error(Globals.CIM_ERROR_GETINSTANCE % "ProcessorPool") - return FAIL - - try: - memdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", - "MemoryPool", - options.virt, - InstanceID = mempool.InstanceID) - except Exception: - logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % mempool.InstanceID) - status = FAIL - - for i in range(len(memdata)): - if memdata[i].classname != get_typed_class(options.virt, "MemResourceAllocationSettingData"): - logger.error("ERROR: Association result error") - status = FAIL - - try: - procdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", - "ProcessorPool", - options.virt, - InstanceID = procpool.InstanceID) - except Exception: - logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % procpool.InstanceID) - status = FAIL - - for j in range(len(procdata)): - if procdata[j].classname != get_typed_class(options.virt, "ProcResourceAllocationSettingData"): - logger.error("ERROR: Association result error") - status = FAIL + status, test_network = create_netpool_conf(options.ip, options.virt) + if status != PASS: + cleanup_restore(options.ip, options.virt) + vsxml.undefine(options.ip) + return status + + if options.virt == 'LXC': + pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} } + rasd = [ "%s/mem" % test_dom ] + else: + pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"}, + "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"}, + "DiskPool" : {'InstanceID' : diskid}, + "NetworkPool" : {'InstanceID' : "NetworkPool/%s" % test_network }} + rasd = [ "%s/mem" % test_dom, + "%s/proc" % test_dom, + "%s/%s" %(test_dom, test_disk), + "%s/%s" % (test_dom, test_mac) ] + for k, v in pool.iteritems(): + status, inst = get_instance(options.ip, k, v, options.virt) + + status = verify_rasd(options.ip, "ResourceAllocationFromPool", + k, options.virt, inst.InstanceID, + rasd) + + cleanup_restore(options.ip, options.virt) + vsxml.undefine(options.ip) return status

+def verify_rasd(server, assoc_cn, cn, virt, list, rasd): + try: + data = assoc.AssociatorNames(server, + assoc_cn, + cn, + virt, + InstanceID=list) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % cn) + return FAIL
It's possible for the AssociatorNames call to return no instances without throwing an error. You'll want to verify that data has at least one element.
+ + if data[0]['InstanceID'] not in rasd:
By using data[0], you're relaying on the CIMOM to return the instance corresponding to test_dom first in the list. This might not always be the case. Instead, you should step through the items in data and select the instance or instances that correspond to test_dom. If you don't find an instance, you should return an error. Also, you're checking the value of InstanceID against all the possible rasd InstanceIDs for test_dom. If the provider returns the MemRASD that corresponds to test_dom when it should have returned the ProcRASD, this check will return a false positive. To fix this, you could change rasd from an array to a list. Something like the following. You already have the pool classname in the verify_rasd() call, so you can use it as a key value to get the corresponding RASD InstanceID. pool_to_ rasd = { {'MemoryPool' : "%s/mem" % test_dom}, {'ProcessorPool' : "%s/proc" % test_dom}, {'DiskPool' : "%s/%s" %(test_dom, test_disk)}, {'NetworkPool' : "%s/%s" % (test_dom, test_mac)} }
+ logger.info ('ID %s' % data[0]['InstanceID']) + logger.error("InstanceID Mismatch") + logger.error("Returned %s error" % data[0]['InstanceID'])
Then here, you can print the ID value AssociatorNames returned versus the expected value.
+ + if options.virt == 'LXC': + pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} } + rasd = [ "%s/mem" % test_dom ] + else: + pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"}, + "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"}, + "DiskPool" : {'InstanceID' : diskid}, + "NetworkPool" : {'InstanceID' : "NetworkPool/%s" % test_network }}
Can you fix this so that the line is 80 characters in length.
+ rasd = [ "%s/mem" % test_dom, + "%s/proc" % test_dom, + "%s/%s" %(test_dom, test_disk), + "%s/%s" % (test_dom, test_mac) ]
+ for k, v in pool.iteritems(): + status, inst = get_instance(options.ip, k, v, options.virt)
If get_instance() returns an error, you should break out of the loop here. The test is only returning the status of the last iteration through the loop. If a previous iteration fails, but the last iteration succeeds, the test returns a false positive.
+ + status = verify_rasd(options.ip, "ResourceAllocationFromPool", + k, options.virt, inst.InstanceID, + rasd)
Same here - you'll need to trap the status and then break from the loop in case of error. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (2)
-
Kaitlin Rupert
-
yunguol@cn.ibm.com