# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1228854256 28800
# Node ID 1ac4ff9815d6812c7418f3de789eb818f23d8dc8
# Parent 46a85b43eafd05357c90b8112007c75fd317b163
[TEST] Fix RAFP 01_forward.py to work when multiple diskpools are defined.
Also update testcase to test GraphicsPool and InputPool.
As this is an almost complete re-write of this test, it's probably easier to
apply the patch and read the source file to review.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 46a85b43eafd -r 1ac4ff9815d6
suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py
--- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Mon Dec 08
14:21:00 2008 -0800
+++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Tue Dec 09
12:24:16 2008 -0800
@@ -23,27 +23,22 @@
#
import sys
-from VirtLib import utils
-from XenKvmLib import assoc
-from XenKvmLib import enumclass
+from CimTest.Globals import logger
+from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.assoc import AssociatorNames
from XenKvmLib.classes import get_typed_class
-from XenKvmLib.test_doms import destroy_and_undefine_all
-from XenKvmLib.vxml import get_class
-from CimTest import Globals
-from CimTest.Globals import logger
-from XenKvmLib.const import do_main, default_pool_name, default_network_name
-from CimTest.ReturnCodes import PASS, FAIL, XFAIL
+from XenKvmLib.vxml import get_class
+from XenKvmLib.const import do_main, default_pool_name, default_network_name, \
+ LXC_netns_support
+from XenKvmLib.pool import pool_cn_to_rasd_cn, enum_pools
+from XenKvmLib.rasd import enum_rasds
+from XenKvmLib.common_util import parse_instance_id
sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
test_dom = "RAFP_dom"
-test_vcpus = 1
-test_mem = 128
-test_mac = "00:11:22:33:44:aa"
-test_npool = default_network_name
def setup_env(server, virt):
- destroy_and_undefine_all(server)
vsxml = None
if virt == "Xen":
test_disk = "xvda"
@@ -56,9 +51,7 @@
if virt == 'LXC':
vsxml = virtxml(test_dom)
else:
- vsxml = virtxml(test_dom, mem=test_mem, vcpus = test_vcpus,
- mac = test_mac, disk = test_disk,
- ntype='network', net_name = test_npool)
+ vsxml = virtxml(test_dom, disk=test_disk)
try:
ret = vsxml.cim_define(server)
@@ -72,75 +65,161 @@
return PASS, vsxml, test_disk
-def get_instance(server, pool, list, virt='Xen'):
- pool_cn = get_typed_class(virt, pool)
- try:
- inst = enumclass.GetInstance(server, pool_cn, list)
- except Exception:
- logger.error(Globals.CIM_ERROR_GETINSTANCE % pool_cn)
- return FAIL, inst
-
- return PASS, inst
+def init_rasd_list(virt, ip, guest_name):
+ disk_rasd_cn = get_typed_class(virt, "DiskResourceAllocationSettingData")
-def verify_rasd(server, assoc_cn, cn, virt, list, rasd):
- try:
- assoc_cn = get_typed_class(virt, assoc_cn)
- data = assoc.AssociatorNames(server,
- assoc_cn,
- get_typed_class(virt, cn),
- InstanceID=list)
- except Exception:
- logger.error(Globals.CIM_ERROR_ASSOCIATORNAMES % cn)
+ rasd_insts = {}
+
+ rasds, status = enum_rasds(virt, ip)
+ if status != PASS:
+ logger.error("Enum RASDs failed")
+ return rasd_insts, status
+
+ for rasd_cn, rasd_list in rasds.iteritems():
+ if virt == "LXC" and rasd_cn == disk_rasd_cn:
+ continue
+
+ for rasd in rasd_list:
+ guest, dev, status = parse_instance_id(rasd.InstanceID)
+ if status != PASS:
+ logger.error("Unable to parse InstanceID: %s" %
rasd.InstanceID)
+ return rasd_insts, FAIL
+
+ if guest == guest_name:
+ rasd_insts[rasd.Classname] = rasd
+
+ return rasd_insts, PASS
+
+def filter_pool_list(virt, list, cn):
+ diskp_cn = get_typed_class(virt, "DiskPool")
+ netp_cn = get_typed_class(virt, "NetworkPool")
+
+ if cn == diskp_cn:
+ exp_id = default_pool_name
+ elif cn == netp_cn:
+ exp_id = default_network_name
+ else:
+ return None, PASS
+
+ if len(list) < 1:
+ logger.error("%s did not return any instances", cn)
+ return None, FAIL
+
+ for inst in list:
+ guest, id, status = parse_instance_id(inst.InstanceID)
+ if status != PASS:
+ logger.error("Unable to parse InstanceID: %s" % inst.InstanceID)
+ return None, FAIL
+
+ if id == exp_id:
+ return inst, PASS
+
+ return None, FAIL
+
+
+def init_pool_list(virt, ip):
+ pool_insts = {}
+
+ pools, status = enum_pools(virt, ip)
+ if status != PASS:
+ return pool_insts, status
+
+ for pool_cn, pool_list in pools.iteritems():
+ inst, status = filter_pool_list(virt, pool_list, pool_cn)
+ if status != PASS:
+ logger.error("Unable to find exp %s inst", pool_cn)
+ return pool_insts, FAIL
+
+ if inst is None:
+ if len(pool_list) != 1:
+ logger.error("Got %d %s, exp 1", len(pool_list), pool_cn)
+ return pool_insts, FAIL
+ inst = pool_list[0]
+
+ pool_insts[pool_cn] = inst
+
+ if len(pool_insts) != len(pools):
+ logger.error("Got %d pool insts, exp %d", len(pool_insts), len(pools))
+ return pool_insts, FAIL
+
+ if virt == "LXC":
+ diskp_cn = get_typed_class(virt, "DiskPool")
+ del pool_insts[diskp_cn]
+
+ if LXC_netns_support is False:
+ netp_cn = get_typed_class(virt, "NetworkPool")
+ del pool_insts[netp_cn]
+
+ return pool_insts, PASS
+
+def verify_rasd(enum_list, rasds, rasd_cn, guest_name):
+ status = FAIL
+
+ for rasd in enum_list:
+ guest, dev, status = parse_instance_id(rasd['InstanceID'])
+ if status != PASS:
+ logger.error("Unable to parse InstanceID: %s",
rasd['InstanceID'])
+ return status
+
+ if guest != guest_name:
+ continue
+
+ exp_rasd = rasds[rasd_cn]
+
+ if rasd['InstanceID'] == exp_rasd.InstanceID:
+ status = PASS
+ else:
+ logger.info("Got %s instead of %s" % (rasd['InstanceID'],
+ exp_rasd.InstanceID))
+ status = FAIL
+
+ if status != PASS:
+ logger.error("RASD with id %s not returned", exp_rasd.InstanceID)
return FAIL
- if len(data) < 1:
- logger.error("Return NULL, expect at least one instance")
- return FAIL
-
- for item in data:
- if item['InstanceID'] == rasd[cn]:
- logger.info("%s InstanceID match - expect %s, got %s" \
- % (cn, rasd[cn], item['InstanceID']))
- return PASS
- logger.error("RASD instance with InstanceID %s not found" % rasd[cn])
- return FAIL
-
+ return PASS
+
@do_main(sup_types)
def main():
options = main.options
status = PASS
-
status, vsxml, test_disk = setup_env(options.ip, options.virt)
if status != PASS:
vsxml.undefine(options.ip)
return status
-
- diskp_id = "DiskPool/%s" % default_pool_name
- if options.virt == 'LXC':
- pool = { "MemoryPool" : {'InstanceID' :
"MemoryPool/0"} }
- rasd = { "MemoryPool" : "%s/mem" % test_dom }
- else:
- pool = { "MemoryPool" : {'InstanceID' :
"MemoryPool/0"},
- "ProcessorPool" : {'InstanceID' :
"ProcessorPool/0"},
- "DiskPool" : {'InstanceID' : diskp_id},
- "NetworkPool" : {'InstanceID' :
"NetworkPool/%s" \
- % test_npool }}
- rasd = { "MemoryPool" : "%s/mem" % test_dom,
- "ProcessorPool" : "%s/proc" % test_dom,
- "DiskPool" : "%s/%s" %(test_dom, test_disk),
- "NetworkPool" : "%s/%s" % (test_dom, test_mac) }
+ try:
+ rasds, status = init_rasd_list(options.virt, options.ip, test_dom)
+ if status != PASS:
+ raise Exception("Unable to build rasd instance list")
- for k, v in pool.iteritems():
- status, inst = get_instance(options.ip, k, v, options.virt)
+ pools, status = init_pool_list(options.virt, options.ip)
if status != PASS:
- break
- status = verify_rasd(options.ip, "ResourceAllocationFromPool",
- k, options.virt, inst.InstanceID,
- rasd)
- if status != PASS:
- break
+ raise Exception("Unable to build pool instance list")
+
+ if len(rasds) != len(pools):
+ raise Exception("%d RASD insts != %d pool insts" % (len(rasds),
+ len(pools)))
+
+ assoc_cn = get_typed_class(options.virt, "ResourceAllocationFromPool")
+ for pool_cn, pool in pools.iteritems():
+ data = AssociatorNames(options.ip,
+ assoc_cn,
+ pool_cn,
+ InstanceID=pool.InstanceID)
+
+ if len(data) < 1:
+ raise Exception("No RASD associated with %s" %
pool.InstanceID)
+
+ rasd_cn = pool_cn_to_rasd_cn(pool_cn, options.virt)
+ status = verify_rasd(data, rasds, rasd_cn, test_dom)
+ if status != PASS:
+ raise Exception("Failed to verify RASDs")
+
+ except Exception, details:
+ logger.error(details)
+ status = FAIL
vsxml.undefine(options.ip)
return status