# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1215778728 25200
# Node ID 2b57a9423f82420b386775bbdf884302d7338ba8
# Parent 9155858d4e5aa69393e652e7fd340ab12a63a153
[TEST] Updating 01_forward.py of EAFP.
1) Modifying the tc to support XenFV and KVM.
2) Modified get_keys() to use proper SystemCreationClassName.
3) Added functions get_id to get the instances for different logical devices so that the
DeviceID of the instances can be used
in the init_list().
4) Added init_list() function to create a list of inputs for the EAFP association.
5) Added eafp_list() to create a list of pool values that will be used to verify the
return values from the EAFP association.
6) Added the function verify_eafp_values() to call assocation on EAFP and verify the
return values.
7) Included cleanup_restore().
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 9155858d4e5a -r 2b57a9423f82
suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Fri Jul 11
04:59:54 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Fri Jul 11
05:18:48 2008 -0700
@@ -44,163 +44,217 @@
import pywbem
from XenKvmLib.test_xml import testxml, testxml_bridge
from VirtLib import utils
-from XenKvmLib import assoc
-from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all
+from XenKvmLib.assoc import Associators
+from XenKvmLib.test_doms import destroy_and_undefine_all
from XenKvmLib import devices
-from CimTest import Globals
-from CimTest.Globals import do_main
-from VirtLib.live import network_by_bridge
-from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from CimTest.Globals import CIM_ERROR_ASSOCIATORS, CIM_ERROR_GETINSTANCE
+from XenKvmLib.vxml import get_class
+from XenKvmLib.vsms import RASD_TYPE_DISK, RASD_TYPE_PROC, \
+RASD_TYPE_MEM, RASD_TYPE_NET_ETHER, RASD_TYPE_DISK
+from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore, \
+eafp_dpool_cap_reserve_val, eafp_mpool_reserve_val
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.logicaldevices import verify_common_pool_values, \
+verify_disk_mem_proc_pool_values
+from CimTest.Globals import do_main, logger
+from VirtLib.live import network_by_bridge, virsh_nodeinfo_cpucount, \
+virsh_nodeinfo_memsize, virsh_dominfo_usedmem
+from CimTest.ReturnCodes import PASS, FAIL
-sup_types = ['Xen']
+sup_types = ['Xen', 'KVM', 'XenFV']
test_dom = "hd_domain"
test_mac = "00:11:22:33:44:aa"
test_vcpus = 1
-test_disk = 'xvda'
def print_error(cn, detail):
- Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, cn)
- Globals.logger.error("Exception: %s", detail)
+ logger.error(CIM_ERROR_GETINSTANCE, cn)
+ logger.error("Exception: %s", detail)
-def get_keys(cn, device_id):
+def get_keys(virt, cn, device_id):
id = "%s/%s" % (test_dom, device_id)
+ sccn = get_typed_class(virt, "ComputerSystem")
key_list = { 'DeviceID' : id,
'CreationClassName' : cn,
'SystemName' : test_dom,
- 'SystemCreationClassName' : "Xen_ComputerSystem"
+ 'SystemCreationClassName' : sccn
}
return key_list
+def get_id(server, virt, cname, id):
+ dev = None
+ cn = get_typed_class(virt, cname)
+ try:
+ key_list = get_keys(virt, cn, id)
+ dev_class = devices.get_class(cn)
+ dev = dev_class(server, key_list)
+ except Exception,detail:
+ print_error(cn, detail)
+ return FAIL, dev
+ return PASS, dev
+
+def init_list(server, virt, test_disk):
+ lelist = {}
+ status, disk = get_id(server, virt, "LogicalDisk", test_disk)
+ if status != PASS :
+ return status, lelist
+
+ status, mem = get_id(server, virt, "Memory", "mem")
+ if status != PASS :
+ return status, lelist
+
+ status, net = get_id(server, virt, "NetworkPort", test_mac)
+ if status != PASS:
+ return status, lelist
+
+ status, proc = get_id(server, virt, "Processor", "0")
+ if status != PASS:
+ return status, lelist
+
+ lelist = {
+ disk.CreationClassName : disk.DeviceID, \
+ mem.CreationClassName : mem.DeviceID, \
+ net.CreationClassName : net.DeviceID, \
+ proc.CreationClassName : proc.DeviceID
+ }
+ return status, lelist
+
+def eafp_list(server, virt, diskid, d_cap, d_reserve, test_network):
+
+ diskpool = {
+ 'CCName' : get_typed_class(virt,
'DiskPool'),
+ 'InstanceID' : diskid,
+ 'PoolID' : diskid,
+ 'ResourceType' : RASD_TYPE_DISK,
+ 'Capacity' : d_cap,
+ 'Reserved' : d_reserve,
+ 'AllocationUnits' : 'Megabytes'
+ }
+ procpool = {
+ 'CCName' : get_typed_class(virt,
'ProcessorPool'),
+ 'InstanceID' : "%s/%s" %
("ProcessorPool", "0"),
+ 'PoolID' : "%s/%s" %
("ProcessorPool", "0"),
+ 'ResourceType' : RASD_TYPE_PROC,
+ 'Capacity' : virsh_nodeinfo_cpucount(server, virt),
+ 'Reserved' : 0,
+ 'AllocationUnits' : 'Processors'
+ }
+ netpool = {
+ 'CCName' : get_typed_class(virt,
'NetworkPool'),
+ 'InstanceID' : "%s/%s" %
("NetworkPool", test_network),
+ 'PoolID' : "%s/%s" %
("NetworkPool", test_network),
+ 'ResourceType' : RASD_TYPE_NET_ETHER
+ }
+ mempool = {
+ 'CCName' : get_typed_class(virt,
'MemoryPool'),
+ 'InstanceID' : "%s/%s" %
("MemoryPool", "0"),
+ 'PoolID' : "%s/%s" %
("MemoryPool", "0"),
+ 'ResourceType' : RASD_TYPE_MEM,
+ 'Reserved' : eafp_mpool_reserve_val(server, virt),
+ 'Capacity' : virsh_nodeinfo_memsize(server, virt),
+ 'AllocationUnits' :'KiloBytes'
+ }
+ eafp_values = { 'procpool' : procpool,
+ 'diskpool' : diskpool,
+ 'netpool' : netpool,
+ 'mempool' : mempool
+ }
+ return eafp_values
+
+def verify_eafp_values(server, virt, diskid, test_network, in_pllist):
+ # Looping through the in_pllist to get association for devices.
+ an = get_typed_class(virt, "ElementAllocatedFromPool")
+ sccn = get_typed_class(virt, "ComputerSystem")
+ status, d_cap, d_reserve = eafp_dpool_cap_reserve_val(server, virt,
+ diskid)
+ if status != PASS:
+ return FAIL
+
+ eafp_values = eafp_list(server, virt, diskid, d_cap, d_reserve, test_network)
+ for cn, devid in sorted(in_pllist.items()):
+ try:
+ assoc_info = Associators(server, an, cn,
+ DeviceID = devid,
+ CreationClassName = cn,
+ SystemName = test_dom,
+ SystemCreationClassName = sccn,
+ virt=virt)
+ if len(assoc_info) != 1:
+ logger.error("%s returned %i ResourcePool objects for "
+ "domain '%s'", an, len(assoc_info),
+ test_dom)
+ status = FAIL
+ break
+ assoc_eafp_info = assoc_info[0]
+ CCName = assoc_eafp_info.classname
+ if CCName == eafp_values['procpool']['CCName']:
+ list_values = eafp_values['procpool']
+ status = verify_disk_mem_proc_pool_values(assoc_eafp_info,
+ list_values)
+ elif CCName == eafp_values['netpool']['CCName']:
+ list_values = eafp_values['netpool']
+ status = verify_common_pool_values(assoc_eafp_info,
+ list_values)
+ elif CCName == eafp_values['diskpool']['CCName']:
+ list_values = eafp_values['diskpool']
+ status = verify_disk_mem_proc_pool_values(assoc_eafp_info,
+ list_values)
+ elif CCName == eafp_values['mempool']['CCName']:
+ list_values = eafp_values['mempool']
+ status = verify_disk_mem_proc_pool_values(assoc_eafp_info,
+ list_values)
+ else:
+ status = FAIL
+ if status != PASS:
+ break
+ except Exception, detail:
+ logger.error(CIM_ERROR_ASSOCIATORS, an)
+ logger.error("Exception: %s", detail)
+ cleanup_restore(server, virt)
+ status = FAIL
+ return status
+
+
@do_main(sup_types)
def main():
options = main.options
+ server = options.ip
+ virt = options.virt
status = PASS
- idx = 0
+ if virt == 'Xen':
+ test_disk = 'xvda'
+ else:
+ test_disk = 'hda'
+ # Getting the VS list and deleting the test_dom if it already exists.
+ destroy_and_undefine_all(server)
+ virt_type = get_class(virt)
+ vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac,
+ disk = test_disk)
-# Getting the VS list and deleting the test_dom if it already exists.
- destroy_and_undefine_all(options.ip)
+ # Verify DiskPool on machine
+ status, diskid = create_diskpool_conf(server, virt)
+ if status != PASS:
+ return status
- test_xml, bridge = testxml_bridge(test_dom, vcpus = test_vcpus, \
- mac = test_mac, disk = test_disk, \
- server = options.ip)
- if bridge == None:
- Globals.logger.error("Unable to find virtual bridge")
- return SKIP
-
- if test_xml == None:
- Globals.logger.error("Guest xml not created properly")
- return FAIL
-
- virt_network = network_by_bridge(bridge, options.ip)
- if virt_network == None:
- Globals.logger.error("No virtual network found for bridge %s", bridge)
- return SKIP
-
- ret = test_domain_function(test_xml, options.ip, cmd = "create")
+ ret = vsxml.create(server)
if not ret:
- Globals.logger.error("Failed to Create the dom: %s", test_dom)
+ logger.error("Failed to Create the dom: '%s'", test_dom)
return FAIL
- try:
- cn = "Xen_LogicalDisk"
- key_list = get_keys(cn, test_disk)
- disk = devices.Xen_LogicalDisk(options.ip, key_list)
- except Exception,detail:
- print_error(cn, detail)
- return FAIL
+ status, lelist = init_list(server, virt, test_disk)
+ if status != PASS:
+ cleanup_restore(server, virt)
+ vsxml.destroy(server)
+ return status
- try:
- cn = "Xen_Memory"
- key_list = get_keys(cn, "mem")
- mem = devices.Xen_Memory(options.ip, key_list)
- except Exception,detail:
- print_error(cn, detail)
- return FAIL
-
- try:
- cn = "Xen_NetworkPort"
- key_list = get_keys(cn, test_mac)
- net = devices.Xen_NetworkPort(options.ip, key_list)
- except Exception,detail:
- print_error(cn, detail)
- return FAIL
-
- try:
- cn = "Xen_Processor"
- key_list = get_keys(cn, "0")
- proc = devices.Xen_Processor(options.ip, key_list)
- except Exception,detail:
- print_error(cn, detail)
- return FAIL
-
- netpool_id = "NetworkPool/%s" % virt_network
-
- lelist = {
- "Xen_LogicalDisk" : disk.DeviceID, \
- "Xen_Memory" : mem.DeviceID, \
- "Xen_NetworkPort" : net.DeviceID, \
- "Xen_Processor" : proc.DeviceID
- }
- poollist = [
- "Xen_DiskPool", \
- "Xen_MemoryPool", \
- "Xen_NetworkPool", \
- "Xen_ProcessorPool"
- ]
- poolval = [
- "DiskPool/foo", \
- "MemoryPool/0", \
- netpool_id, \
- "ProcessorPool/0"
- ]
-
- sccn = "Xen_ComputerSystem"
- for cn, devid in sorted(lelist.items()):
- try:
- assoc_info = assoc.Associators(options.ip, \
- "Xen_ElementAllocatedFromPool",
- cn,
- DeviceID = devid,
- CreationClassName = cn,
- SystemName = test_dom,
- SystemCreationClassName = sccn)
- if len(assoc_info) != 1:
- Globals.logger.error("Xen_ElementAllocatedFromPool returned %i\
- ResourcePool objects for domain '%s'", len(assoc_info), test_dom)
- status = FAIL
- break
-
- if assoc_info[0].classname != poollist[idx]:
- Globals.logger.error("Classname Mismatch")
- Globals.logger.error("Returned %s instead of %s", \
- assoc_info[0].classname, \
- poollist[idx])
- status = FAIL
-
- if assoc_info[0]['InstanceID'] != poolval[idx]:
- Globals.logger.error("InstanceID Mismatch")
- Globals.logger.error("Returned %s instead of %s", \
- assoc_info[0]['InstanceID'], \
- poolval[idx])
- status = FAIL
-
- if status != PASS:
- break
- else:
- idx = idx + 1
-
- except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \
- 'Xen_ElementAllocatedFromPool')
- Globals.logger.error("Exception: %s", detail)
- status = FAIL
-
- ret = test_domain_function(test_dom, options.ip, \
- cmd = "destroy")
+ virt_network = vsxml.xml_get_net_network()
+ status = verify_eafp_values(server, virt, diskid, virt_network, lelist)
+
+ cleanup_restore(server, virt)
+ vsxml.destroy(server)
return status
if __name__ == "__main__":