[PATCH] [TEST] 2# Update RAFP.01 for LXC support, also add DiskPool and NetworkPool verification

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1213777121 -28800 # Node ID 09f95362435b3fcfc1e37445bb3b394a1ec59885 # Parent f4a167c62403c8aaaac5127d0c984c74ea863344 [TEST] 2# Update RAFP.01 for LXC support, also add DiskPool and NetworkPool verification 2# Test and pass for Xen, KVM and LXC Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r f4a167c62403 -r 09f95362435b suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Tue Jun 17 13:20:47 2008 +0800 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Wed Jun 18 16:18:41 2008 +0800 @@ -26,17 +26,64 @@ from VirtLib import utils from XenKvmLib import assoc from XenKvmLib import enumclass +from XenKvmLib.test_doms import destroy_and_undefine_all +from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import logger, do_main +from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \ +create_netpool_conf from CimTest.ReturnCodes import PASS, FAIL, XFAIL -sup_types = ['Xen', 'XenFV', 'KVM'] +sup_types = ['Xen', 'XenFV', 'KVM', 'LXC'] + +test_dom = "RAFP_dom" +test_vcpus = 1 +test_mem = 128 +test_mac = "00:11:22:33:44:aa" + +def setup_env(server, virt): + destroy_and_undefine_all(server) + vsxml = None + if virt == "Xen": + test_disk = "xvda" + else: + test_disk = "hda" + + virtxml = get_class(virt) + if virt == 'LXC': + vsxml = virtxml(test_dom) + else: + vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus, + mac = test_mac, disk = test_disk) + try: + ret = vsxml.define(server) + if not ret: + logger.error("Failed to Define the domain: %s", test_dom) + return FAIL, vsxml, test_disk + + except Exception, details: + logger.error("Exception : %s", details) + return FAIL, vsxml, test_disk + + return PASS, vsxml, test_disk @do_main(sup_types) def main(): options = main.options status = PASS + + status, vsxml, test_disk = setup_env(options.ip, options.virt) + if status != PASS: + return status + + status, diskid = create_diskpool_conf(options.ip, options.virt) + if status != PASS: + return status + + status, test_network = create_netpool_conf(options.ip, options.virt) + if status != PASS: + return status try: key_list = { 'InstanceID' : "MemoryPool/0" } @@ -57,6 +104,27 @@ except Exception: logger.error(Globals.CIM_ERROR_GETINSTANCE % "ProcessorPool") return FAIL + + try: + key_list = { 'InstanceID' : diskid} + diskpool = enumclass.getInstance(options.ip, + "DiskPool", + key_list, + options.virt) + except Exception: + logger.error(Globals.CIM_ERROR_GETINSTANCE % "DiskPool") + return FAIL + + try: + key_list = { 'InstanceID' : "NetworkPool/%s" % test_network } + netpool = enumclass.getInstance(options.ip, + "NetworkPool", + key_list, + options.virt) + except Exception: + logger.error(Globals.CIM_ERROR_GETINSTANCE % "NetworkPool") + return FAIL + try: memdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", @@ -86,6 +154,35 @@ logger.error("ERROR: Association result error") status = FAIL + try: + diskdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", + "DiskPool", + options.virt, + InstanceID = diskpool.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % diskpool.InstanceID) + status = FAIL + + for i in range(len(diskdata)): + if diskdata[i].classname != get_typed_class(options.virt, "DiskResourceAllocationSettingData"): + logger.error("ERROR: Association result error") + status = FAIL + + try: + netdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", + "NetworkPool", + options.virt, + InstanceID = netpool.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % netpool.InstanceID) + status = FAIL + + for i in range(len(netdata)): + if netdata[i].classname != get_typed_class(options.virt, "NetResourceAllocationSettingData"): + logger.error("ERROR: Association result error") + status = FAIL + + return status

+def setup_env(server, virt): + destroy_and_undefine_all(server) + vsxml = None + if virt == "Xen": + test_disk = "xvda" + else: + test_disk = "hda"
hda doesn't apply for LXC - maybe set the value to None in that case?
try: key_list = { 'InstanceID' : "MemoryPool/0" } @@ -57,6 +104,27 @@ except Exception: logger.error(Globals.CIM_ERROR_GETINSTANCE % "ProcessorPool") return FAIL
I know this is existing code, but you'll want to be sure you call cleanup_restore() and undefine your guest whenever you fail. The same applied for all the instances where you do a return FAIL below.
+ + try: + key_list = { 'InstanceID' : diskid} + diskpool = enumclass.getInstance(options.ip, + "DiskPool", + key_list, + options.virt) + except Exception: + logger.error(Globals.CIM_ERROR_GETINSTANCE % "DiskPool") + return FAIL + + try: + key_list = { 'InstanceID' : "NetworkPool/%s" % test_network } + netpool = enumclass.getInstance(options.ip, + "NetworkPool", + key_list, + options.virt) + except Exception: + logger.error(Globals.CIM_ERROR_GETINSTANCE % "NetworkPool") + return FAIL +
LXC doesn't support disk, network, and proc yet. So we shouldn't check these because we don't have any resources we're allocated from a pool. Also, this test is a bit difficult to read. I'd suggest creating a function that handles everything in the try block. You can then call this function to get each of the instances you need.
try: memdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", @@ -86,6 +154,35 @@ logger.error("ERROR: Association result error") status = FAIL
+ try: + diskdata = assoc.AssociatorNames(options.ip, "ResourceAllocationFromPool", + "DiskPool", + options.virt, + InstanceID = diskpool.InstanceID) + except Exception: + logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % diskpool.InstanceID) + status = FAIL
Something similar could be said here - the association calls for each device will be similar, so you could make a function out of this.
+ + for i in range(len(diskdata)): + if diskdata[i].classname != get_typed_class(options.virt, "DiskResourceAllocationSettingData"): + logger.error("ERROR: Association result error") + status = FAIL
You'll also want to verify you got the expected number of instances back from the association. If the association returns no instances, this for loop is skipped and the test returns a false positive. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (2)
-
Kaitlin Rupert
-
yunguol@cn.ibm.com