[PATCH 0 of 3] [TEST] Add ElementAllocatedFromPool for XenFV & KVM support

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1208758537 25200 # Node ID c5d39cec5989977b3c2bff149044b8d005272ea4 # Parent 0d31dff13ae341fd6515dc844ee98c7b5300b71d [TEST] update network_by_bridge in live.py for KVM & XenFV support Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r 0d31dff13ae3 -r c5d39cec5989 lib/VirtLib/live.py --- a/lib/VirtLib/live.py Fri Apr 18 17:00:16 2008 +0800 +++ b/lib/VirtLib/live.py Sun Apr 20 23:15:37 2008 -0700 @@ -301,15 +301,15 @@ def get_bridge_from_network_xml(network, if len(bridge) > 1: return bridge[1] -def network_by_bridge(bridge, server): +def network_by_bridge(bridge, server, virt="Xen"): """Function returns virtual network for a given bridge""" - networks = net_list(server) + networks = net_list(server, virt) if len(networks) == 0: return None for network in networks: - if bridge == get_bridge_from_network_xml(network, server): + if bridge == get_bridge_from_network_xml(network, server, virt): return network return None

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1208758684 25200 # Node ID f9c5b8d69e259c572d2302203aef0e7a192b3659 # Parent c5d39cec5989977b3c2bff149044b8d005272ea4 [TEST] Add ElementAllocatedFromPool.01 for XenFV & KVM support Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r c5d39cec5989 -r f9c5b8d69e25 suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Sun Apr 20 23:15:37 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Sun Apr 20 23:18:04 2008 -0700 @@ -21,56 +21,89 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # -# This tc is used to verify the classname, InstanceID are -# appropriately set for a given of the domains when verified using the +# This tc is used to verify the classname, InstanceID are +# appropriately set for a given of the domains when verified using the # Xen_ElementAllocatedFromPool asscoiation. # # Example command for LogicalDisk w.r.t to Xen_ElementAllocatedFromPool \ # asscoiation : # -# wbemcli ain -ac Xen_ElementAllocatedFromPool +# wbemcli ain -ac Xen_ElementAllocatedFromPool # 'http://localhost:5988/root/virt: # Xen_LogicalDisk.CreationClassName="Xen_LogicalDisk",\ # DeviceID="hd_domain/xvda",SystemCreationClassName="",SystemName="hd_domain"' -# +# # Output: # localhost:5988/root/virt:Xen_DiskPool.InstanceID="DiskPool/foo" -# +# # Similarly we check for Memory,Network,Processor. # # Date : 26-11-2007 import sys import pywbem +import os +from distutils.file_util import move_file from XenKvmLib.test_xml import testxml, testxml_bridge from VirtLib import utils from XenKvmLib import assoc -from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all +from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib import devices +from XenKvmLib import vxml +from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import do_main from VirtLib.live import network_by_bridge from CimTest.ReturnCodes import PASS, FAIL, SKIP -sup_types = ['Xen'] +sup_types = ['Xen', 'XenFV', 'KVM'] test_dom = "hd_domain" test_mac = "00:11:22:33:44:aa" -test_vcpus = 1 -test_disk = 'xvda' - +test_vcpus = 1 +test_dpath = "foo" +disk_file = '/tmp/diskpool.conf' +back_disk_file = disk_file + "." + "01_forward" + +def conf_file(): + """ + Creating diskpool.conf file. + """ + try: + f = open(disk_file, 'w') + f.write('%s %s' % (test_dpath, '/')) + f.close() + except Exception,detail: + Globals.logger.error("Exception: %s", detail) + status = SKIP + sys.exit(status) + +def clean_up_restore(): + """ + Restoring back the original diskpool.conf + file. + """ + try: + if os.path.exists(back_disk_file): + os.remove(disk_file) + move_file(back_disk_file, disk_file) + except Exception, detail: + Globals.logger.error("Exception: %s", detail) + status = SKIP + sys.exit(status) def print_error(cn, detail): Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, cn) Globals.logger.error("Exception: %s", detail) -def get_keys(cn, device_id): +def get_keys(cn, device_id, virt='Xen'): id = "%s/%s" % (test_dom, device_id) + cs = get_typed_class(virt, "ComputerSystem") key_list = { 'DeviceID' : id, 'CreationClassName' : cn, 'SystemName' : test_dom, - 'SystemCreationClassName' : "Xen_ComputerSystem" + 'SystemCreationClassName' : cs } return key_list @@ -85,93 +118,104 @@ def main(): destroy_and_undefine_all(options.ip) Globals.log_param() - test_xml, bridge = testxml_bridge(test_dom, vcpus = test_vcpus, \ - mac = test_mac, disk = test_disk, \ - server = options.ip) + os.system("rm -f %s" % back_disk_file ) + if not (os.path.exists(disk_file)): + conf_file() + else: + move_file(disk_file, back_disk_file) + conf_file() + + if options.virt == 'Xen': + test_disk = 'xvda' + else: + test_disk = 'hda' + + virt_xml = vxml.get_class(options.virt) + cxml = virt_xml(test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk) + bridge = cxml.set_vbridge(options.ip) + if bridge == None: Globals.logger.error("Unable to find virtual bridge") - return SKIP - - if test_xml == None: - Globals.logger.error("Guest xml not created properly") - return FAIL - - virt_network = network_by_bridge(bridge, options.ip) + return SKIP + + virt_network = network_by_bridge(bridge, options.ip, options.virt) if virt_network == None: Globals.logger.error("No virtual network found for bridge %s", bridge) - return SKIP - - ret = test_domain_function(test_xml, options.ip, cmd = "create") + return SKIP + + ret = cxml.define(options.ip) if not ret: - Globals.logger.error("Failed to Create the dom: %s", test_dom) - return FAIL - - try: - cn = "Xen_LogicalDisk" - key_list = get_keys(cn, test_disk) - disk = devices.Xen_LogicalDisk(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - try: - cn = "Xen_Memory" - key_list = get_keys(cn, "mem") - mem = devices.Xen_Memory(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - try: - cn = "Xen_NetworkPort" - key_list = get_keys(cn, test_mac) - net = devices.Xen_NetworkPort(options.ip, key_list) - except Exception,detail: - print_error(cn, detail) - return FAIL - - try: - cn = "Xen_Processor" - key_list = get_keys(cn, "0") - proc = devices.Xen_Processor(options.ip, key_list) + Globals.logger.error('Unable to create domain %s' % test_dom) + return FAIL + + disk_cn = get_typed_class(options.virt, "LogicalDisk") + mem_cn = get_typed_class(options.virt, "Memory") + net_cn = get_typed_class(options.virt, "NetworkPort") + proc_cn = get_typed_class(options.virt, "Processor") + + try: + key_list = get_keys(disk_cn, test_disk, options.virt) + disk = eval('devices.' + disk_cn)(options.ip, key_list) + except Exception,detail: + print_error(cn, detail) + return FAIL + + try: + key_list = get_keys(mem_cn, "mem", options.virt) + mem = eval('devices.' + mem_cn)(options.ip, key_list) + except Exception,detail: + print_error(cn, detail) + return FAIL + + try: + key_list = get_keys(net_cn, test_mac, options.virt) + net = eval('devices.' + net_cn)(options.ip, key_list) + except Exception,detail: + print_error(cn, detail) + return FAIL + + try: + key_list = get_keys(proc_cn, "0", options.virt) + proc = eval('devices.' + proc_cn)(options.ip, key_list) except Exception,detail: print_error(cn, detail) return FAIL netpool_id = "NetworkPool/%s" % virt_network - + diskpool_id = "DiskPool/%s" % test_dpath lelist = { - "Xen_LogicalDisk" : disk.DeviceID, \ - "Xen_Memory" : mem.DeviceID, \ - "Xen_NetworkPort" : net.DeviceID, \ - "Xen_Processor" : proc.DeviceID + disk_cn : disk.DeviceID, \ + mem_cn : mem.DeviceID, \ + net_cn : net.DeviceID, \ + proc_cn : proc.DeviceID } - poollist = [ - "Xen_DiskPool", \ - "Xen_MemoryPool", \ - "Xen_NetworkPool", \ - "Xen_ProcessorPool" + poollist = [ + get_typed_class(options.virt, "DiskPool"), \ + get_typed_class(options.virt, "MemoryPool"), \ + get_typed_class(options.virt, "NetworkPool"), \ + get_typed_class(options.virt, "ProcessorPool") ] - poolval = [ - "DiskPool/foo", \ + poolval = [ + diskpool_id, \ "MemoryPool/0", \ netpool_id, \ "ProcessorPool/0" ] - sccn = "Xen_ComputerSystem" + sccn = get_typed_class(options.virt, "ComputerSystem") for cn, devid in sorted(lelist.items()): try: assoc_info = assoc.Associators(options.ip, \ - "Xen_ElementAllocatedFromPool", + "ElementAllocatedFromPool", cn, + options.virt, DeviceID = devid, CreationClassName = cn, SystemName = test_dom, SystemCreationClassName = sccn) if len(assoc_info) != 1: - Globals.logger.error("Xen_ElementAllocatedFromPool returned %i\ - ResourcePool objects for domain '%s'", len(assoc_info), test_dom) + Globals.logger.error("ElementAllocatedFromPool returned %i ResourcePool \ + objects for domain '%s'", len(assoc_info), test_dom) status = FAIL break @@ -181,8 +225,8 @@ def main(): assoc_info[0].classname, \ poollist[idx]) status = FAIL - - if assoc_info[0]['InstanceID'] != poolval[idx]: + + if assoc_info[0]['InstanceID'] != poolval[idx]: Globals.logger.error("InstanceID Mismatch") Globals.logger.error("Returned %s instead of %s", \ assoc_info[0]['InstanceID'], \ @@ -196,13 +240,15 @@ def main(): except Exception, detail: Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \ - 'Xen_ElementAllocatedFromPool') + 'ElementAllocatedFromPool') Globals.logger.error("Exception: %s", detail) status = FAIL - ret = test_domain_function(test_dom, options.ip, \ - cmd = "destroy") + cxml.destroy(options.ip) + cxml.undefine(options.ip) + clean_up_restore() + return status - + if __name__ == "__main__": sys.exit(main())

# HG changeset patch # User Guolian Yun <yunguol@cn.ibm.com> # Date 1208758802 25200 # Node ID b2c1570552bbaaefa5add1d9c42bb1f737426e73 # Parent f9c5b8d69e259c572d2302203aef0e7a192b3659 [TEST] Add ElementAllocatedFromPool.02 for XenFV & KVM support Signed-off-by: Guolian Yun <yunguol@cn.ibm.com> diff -r f9c5b8d69e25 -r b2c1570552bb suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Sun Apr 20 23:18:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Sun Apr 20 23:20:02 2008 -0700 @@ -53,21 +53,21 @@ from VirtLib import live from VirtLib import live from XenKvmLib import assoc from XenKvmLib import enumclass +from XenKvmLib import vxml +from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import do_main from CimTest.ReturnCodes import PASS, FAIL, SKIP -from XenKvmLib.test_xml import testxml_bridge -from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all +from XenKvmLib.test_doms import destroy_and_undefine_all from VirtLib.live import network_by_bridge -sup_types = ['Xen'] +sup_types = ['Xen', 'XenFV', 'KVM'] status = PASS test_dom = "hd_domain" test_mac = "00:11:22:33:44:aa" test_mem = 128 test_vcpus = 4 -test_disk = "xvdb" test_dpath = "foo" disk_file = '/tmp/diskpool.conf' back_disk_file = disk_file + "." + "02_reverse" @@ -105,14 +105,14 @@ def clean_up_restore(ip): sys.exit(status) -def get_or_bail(ip, id, pool_class): +def get_or_bail(ip, id, pool_class, virt="Xen"): """ Getinstance for the CLass and return instance on success, otherwise exit after cleanup_restore and destroying the guest. """ key_list = { 'InstanceID' : id } try: - instance = enumclass.getInstance(ip, pool_class, key_list) + instance = enumclass.getInstance(ip, pool_class, key_list, virt) except Exception, detail: Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, '%s', pool_class) Globals.logger.error("Exception: %s", detail) @@ -127,22 +127,22 @@ def print_error(field, ret_val, req_val) Globals.logger.error("%s Mismatch", field) Globals.logger.error("Returned %s instead of %s", ret_val, req_val) -def init_list(ip, disk, mem, net, proc): +def init_list(ip, disk, mem, net, proc, virt="Xen"): """ Creating the lists that will be used for comparisons. """ pllist = { - "Xen_DiskPool" : disk.InstanceID, \ - "Xen_MemoryPool" : mem.InstanceID, \ - "Xen_NetworkPool" : net.InstanceID, \ - "Xen_ProcessorPool": proc.InstanceID + get_typed_class(virt, "DiskPool") : disk.InstanceID, \ + get_typed_class(virt, "MemoryPool") : mem.InstanceID, \ + get_typed_class(virt, "NetworkPool") : net.InstanceID, \ + get_typed_class(virt, "ProcessorPool"): proc.InstanceID } cllist = [ - "Xen_LogicalDisk", \ - "Xen_Memory", \ - "Xen_NetworkPort", \ - "Xen_Processor" + get_typed_class(virt, "LogicalDisk"), \ + get_typed_class(virt, "Memory"), \ + get_typed_class(virt, "NetworkPort"), \ + get_typed_class(virt, "Processor") ] prop_list = ["%s/%s" % (test_dom, test_disk), test_disk, \ "%s/%s" % (test_dom, "mem"), test_mem, \ @@ -162,17 +162,17 @@ def get_inst_for_dom(assoc_val): return list -def get_spec_fields_list(inst_list, field_name): +def get_spec_fields_list(inst_list, field_name, virt="Xen"): global status specific_fields = { } if (len(inst_list)) != 1: - Globals.logger.error("Got %s record for Memory/Network/LogicalDisk instead of \ -1", len(inst_list)) + Globals.logger.error("Got %s record for Memory/Network/LogicalDisk \ + instead of 1", len(inst_list)) status = FAIL return # verifying the Name field for LogicalDisk try: - if inst_list[0]['CreationClassName'] != 'Xen_Memory': + if inst_list[0]['CreationClassName'] != get_typed_class(virt, 'Memory'): field_value = inst_list[0][field_name] if field_name == 'NetworkAddresses': # For network we NetworkAddresses is a list of addresses, since we @@ -190,11 +190,13 @@ 1", len(inst_list)) return specific_fields -def assoc_values(assoc_list, field , list, index, specific_fields_list=""): +def assoc_values(assoc_list, field , list, index, + specific_fields_list="", virt="Xen"): """ Verifying the records retruned by the associations. """ global status + cn = get_typed_class(virt, 'Processor') if field == "CreationClassName": for i in range(len(assoc_list)): if assoc_list[i][field] != list[index]: @@ -203,7 +205,7 @@ def assoc_values(assoc_list, field , li if status != PASS: break elif field == "DeviceID": - if assoc_list[0]['CreationClassName'] == 'Xen_Processor': + if assoc_list[0]['CreationClassName'] == cn: # Verifying the list of DeviceId returned by the association # against the list created intially . for i in range(len(list)): @@ -218,7 +220,7 @@ def assoc_values(assoc_list, field , li status = FAIL else: # other specific fields verification - if assoc_list[0]['CreationClassName'] != 'Xen_Processor': + if assoc_list[0]['CreationClassName'] != cn: spec_field_name = specific_fields_list['field_name'] spec_field_value = specific_fields_list['field_value'] if spec_field_value != list[index]: @@ -235,24 +237,28 @@ def main(): server = options.ip destroy_and_undefine_all(options.ip) Globals.log_param() - test_xml, bridge = testxml_bridge(test_dom, mem = test_mem, vcpus = test_vcpus, \ - mac = test_mac, disk = test_disk, server = options.ip) + global test_disk + if options.virt == 'Xen': + test_disk = 'xvda' + else: + test_disk = 'hda' + + virt_xml = vxml.get_class(options.virt) + cxml = virt_xml(test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk) + bridge = cxml.set_vbridge(options.ip) + if bridge == None: Globals.logger.error("Unable to find virtual bridge") return SKIP - if test_xml == None: - Globals.logger.error("Guest xml was not created properly") - return FAIL - - virt_network = network_by_bridge(bridge, server) + virt_network = network_by_bridge(bridge, options.ip, options.virt) if virt_network == None: Globals.logger.error("No virtual network found for bridge %s", bridge) return SKIP - ret = test_domain_function(test_xml, server, cmd = "create") + ret = cxml.create(options.ip) if not ret: - Globals.logger.error("Failed to Create the dom: %s", test_dom) + logger.error('Unable to create domain %s' % test_dom) return FAIL # Taking care of already existing diskconf file @@ -264,16 +270,20 @@ def main(): else: move_file(disk_file, back_disk_file) conf_file() + diskpool_cn = get_typed_class(options.virt, "DiskPool") + mempool_cn = get_typed_class(options.virt, "MemoryPool") + netpool_cn = get_typed_class(options.virt, "NetworkPool") + procpool_cn = get_typed_class(options.virt, "ProcessorPool") + diskpool = eval('enumclass.' + diskpool_cn) + mempool = eval('enumclass.' + mempool_cn) + netpool = eval('enumclass.' + netpool_cn) + procpool = eval('enumclass.' + procpool_cn) try : - disk = get_or_bail(server, id=diskid, \ - pool_class=enumclass.Xen_DiskPool) - mem = get_or_bail(server, id = memid, \ - pool_class=enumclass.Xen_MemoryPool) + disk = get_or_bail(server, id = diskid, pool_class = diskpool, virt=options.virt) + mem = get_or_bail(server, id = memid, pool_class = mempool, virt=options.virt) netid = "%s/%s" % ("NetworkPool", virt_network) - net = get_or_bail(server, id = netid, \ - pool_class=enumclass.Xen_NetworkPool) - proc = get_or_bail(server, id = procid, \ - pool_class=enumclass.Xen_ProcessorPool) + net = get_or_bail(server, id = netid, pool_class = netpool, virt=options.virt) + proc = get_or_bail(server, id = procid, pool_class = procpool, virt=options.virt) except Exception, detail: Globals.logger.error("Exception: %s", detail) @@ -283,61 +293,70 @@ def main(): cmd = "destroy") return status - pllist, cllist, prop_list, proc_prop = init_list(server, disk, mem, net, proc) + pllist, cllist, prop_list, proc_prop = init_list(server, disk, mem, net, proc, options.virt) # Looping through the pllist to get association for various pools. for cn, instid in sorted(pllist.items()): try: - assoc_info = assoc.Associators(server, \ - "Xen_ElementAllocatedFromPool", \ - cn, \ - InstanceID = instid) + assoc_info = assoc.Associators(server, + "ElementAllocatedFromPool", + cn, + options.virt, + InstanceID = instid) # Verifying the Creation Class name for all the records returned for each # pool class queried inst_list = get_inst_for_dom(assoc_info) if (len(inst_list)) == 0: Globals.logger.error("Association did not return any records for \ -the specified domain: %s", test_dom) + the specified domain: %s", test_dom) status = FAIL break - - assoc_values(assoc_list=inst_list, field="CreationClassName", \ - list=cllist, \ - index=loop) + + assoc_values(assoc_list=inst_list, field="CreationClassName", + list=cllist, index=loop, virt=options.virt) # verifying the DeviceID - if inst_list[0]['CreationClassName'] == 'Xen_Processor': + proc_cn = get_typed_class(options.virt, 'Processor') + mem_cn = get_typed_class(options.virt, 'Memory') + disk_cn = get_typed_class(options.virt, 'LogicalDisk') + if inst_list[0]['CreationClassName'] == proc_cn: # The DeviceID for the processor varies from 0 to (vcpu - 1 ) list_index = 0 assoc_values(assoc_list=inst_list, field="DeviceID", \ list=proc_prop, \ - index=list_index) + index=list_index, \ + virt=options.virt) else: # For LogicalDisk, Memory and NetworkPort - if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk': + if inst_list[0]['CreationClassName'] == disk_cn: list_index = 0 - elif inst_list[0]['CreationClassName'] == 'Xen_Memory': + elif inst_list[0]['CreationClassName'] == mem_cn: list_index = 2 else: list_index = 4 # NetworkPort assoc_values(assoc_list=inst_list, field="DeviceID", \ list=prop_list, \ - index=list_index) - if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk': + index=list_index, \ + virt=options.virt) + if inst_list[0]['CreationClassName'] == disk_cn: # verifying the Name field for LogicalDisk - specific_fields = get_spec_fields_list(inst_list,field_name="Name") + specific_fields = get_spec_fields_list(inst_list,field_name="Name", \ + virt=options.virt) list_index = 1 - elif inst_list[0]['CreationClassName'] == 'Xen_Memory': + elif inst_list[0]['CreationClassName'] == mem_cn: # verifying the NumberOfBlocks allocated for Memory - specific_fields = get_spec_fields_list(inst_list,field_name="NumberOfBlocks") + specific_fields = get_spec_fields_list(inst_list,field_name="NumberOfBlocks", \ + virt=options.virt) list_index = 3 else: # verifying the NetworkAddresses for the NetworkPort - specific_fields = get_spec_fields_list(inst_list,field_name="NetworkAddresses") + specific_fields = get_spec_fields_list(inst_list,field_name="NetworkAddresses", \ + virt=options.virt) list_index = 5 # NetworkPort assoc_values(assoc_list=inst_list, field="Other", \ list=prop_list, \ index=list_index, \ - specific_fields_list=specific_fields) + specific_fields_list=specific_fields, \ + virt=options.virt) if status != PASS: break else: @@ -345,13 +364,14 @@ the specified domain: %s", test_dom) loop = loop + 1 except Exception, detail: Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \ - 'Xen_ElementAllocatedFromPool') + 'ElementAllocatedFromPool') Globals.logger.error("Exception: %s", detail) clean_up_restore(server) status = FAIL - ret = test_domain_function(test_dom, server, \ - cmd = "destroy") + cxml.destroy(options.ip) + cxml.undefine(options.ip) + clean_up_restore(server) return status if __name__ == "__main__":

Guo Lian Yun wrote:
Signed-off-by: Guolian Yun <yunguol@cn.ibm.com>
EAFP 01_forward.py and EAFP 02_reverse.py both fail for me on F9 with KVM and updated libvirt-cim sources. EAFP 01 and 02 are failing on Xen. I'd like to see test cases fixed before we add KVM and XenFV support - otherwise, we can't test the KVM and XenFV support properly. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

libvirt-cim-bounces@redhat.com wrote on 2008-04-21 23:31:45:
Guo Lian Yun wrote:
Signed-off-by: Guolian Yun <yunguol@cn.ibm.com>
EAFP 01_forward.py and EAFP 02_reverse.py both fail for me on F9 with KVM and updated libvirt-cim sources.
Could you please send the error log to me? Or do you know why these are failing? It pass for me on F9 with KVM and recent libvrit-cim sources.
EAFP 01 and 02 are failing on Xen. I'd like to see test cases fixed before we add KVM and XenFV support - otherwise, we can't test the KVM and XenFV support properly.
They are fail for Xen on my own machine(F8), but pass on other machine on F8, also pass on XenFV tested by Deepti. Of course, I will look into this issue.
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

Guo Lian Yun wrote:
libvirt-cim-bounces@redhat.com wrote on 2008-04-21 23:31:45:
Guo Lian Yun wrote:
Signed-off-by: Guolian Yun <yunguol@cn.ibm.com>
EAFP 01_forward.py and EAFP 02_reverse.py both fail for me on F9 with KVM and updated libvirt-cim sources.
Could you please send the error log to me? Or do you know why these are failing? It pass for me on F9 with KVM and recent libvrit-cim sources.
Here's the logs - I should have included them originally. EAFP 01 is failing due to the disk pool issue. You'll need to create a function that makes sure the guest's disk resource belongs to a pool. I'd suggested this in a previous mail, and I know you said you were going to add such a function. But I haven't seen a patch for that yet - did I miss it? Also, what version of libvirt are you using? The disk pool support is new in 0.4.0. I'd assume that F9 would have libvirt 0.4.0 or higher. It's possible your F8 system does not have a new version, which would cause this test to pass. ElementAllocatedFromPool - 01_forward.py: FAIL ERROR - ElementAllocatedFromPool returned 0 ResourcePool objects for domain 'hd_domain' CIM_ERR_FAILED: Unknown pool membership for `hd_domain/hda' ----- ElementAllocatedFromPool - 02_reverse.py: FAIL ERROR - AttributeError : 'list' object has no attribute 'InstanceID' CIM_ERR_NOT_FOUND: No such instance (foo) This is because the InstanceID used for the diskpool is incorrect. This InstanceID will work on libvirt versions older than 0.4.0, but newer versions need to use the diskpool ID.
EAFP 01 and 02 are failing on Xen. I'd like to see test cases fixed before we add KVM and XenFV support - otherwise, we can't test the KVM and XenFV support properly.
They are fail for Xen on my own machine(F8), but pass on other machine on F8, also pass on XenFV tested by Deepti. Of course, I will look into this issue.
I'm guessing the XenFV system probably has an older version of libvirt. Sorry for all the confusion! It's tricky making tests work for multiple guest types, as well as with differing package versions. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Guo Lian Yun wrote:
libvirt-cim-bounces@redhat.com wrote on 2008-04-21 23:31:45:
Guo Lian Yun wrote:
Signed-off-by: Guolian Yun <yunguol@cn.ibm.com>
EAFP 01_forward.py and EAFP 02_reverse.py both fail for me on F9 with KVM and updated libvirt-cim sources.
Could you please send the error log to me? Or do you know why these are failing? It pass for me on F9 with KVM and recent libvrit-cim
libvirt-cim-bounces@redhat.com wrote on 2008-04-24 04:30:39: sources.
Here's the logs - I should have included them originally.
EAFP 01 is failing due to the disk pool issue. You'll need to create a function that makes sure the guest's disk resource belongs to a pool. I'd suggested this in a previous mail, and I know you said you were going to add such a function. But I haven't seen a patch for that yet - did I miss it?
I've created diskpool.conf file in EAFP.01 patch as follows. Any other file have to be created? +def conf_file(): + """ + Creating diskpool.conf file. + """ + try: + f = open(disk_file, 'w') + f.write('%s %s' % (test_dpath, '/')) + f.close() + except Exception,detail: + Globals.logger.error("Exception: %s", detail) + status = SKIP + sys.exit(status) + +def clean_up_restore(): + """ + Restoring back the original diskpool.conf + file. + """ + try: + if os.path.exists(back_disk_file): + os.remove(disk_file) + move_file(back_disk_file, disk_file) + except Exception, detail: + Globals.logger.error("Exception: %s", detail) + status = SKIP + sys.exit(status)
Also, what version of libvirt are you using? The disk pool support is new in 0.4.0. I'd assume that F9 would have libvirt 0.4.0 or higher. It's possible your F8 system does not have a new version, which would cause this test to pass.
ElementAllocatedFromPool - 01_forward.py: FAIL ERROR - ElementAllocatedFromPool returned 0 ResourcePool objects for domain 'hd_domain' CIM_ERR_FAILED: Unknown pool membership for `hd_domain/hda'
-----
ElementAllocatedFromPool - 02_reverse.py: FAIL ERROR - AttributeError : 'list' object has no attribute 'InstanceID' CIM_ERR_NOT_FOUND: No such instance (foo)
This is because the InstanceID used for the diskpool is incorrect. This InstanceID will work on libvirt versions older than 0.4.0, but newer versions need to use the diskpool ID.
If the libvirt 0.4.0 or newer, the disk pool is supported. But why I can't get th diskpool ID by wbemein? There is none output of wbemein below(the libvirt version is 0.4.1 on my test machine): wbemein http://root:password@localhost:5988/root/virt:KVM_DiskPool
EAFP 01 and 02 are failing on Xen. I'd like to see test cases fixed before we add KVM and XenFV support - otherwise, we can't test the
KVM
and XenFV support properly.
They are fail for Xen on my own machine(F8), but pass on other machine on F8, also pass on XenFV tested by Deepti. Of course, I will look into this issue.
I'm guessing the XenFV system probably has an older version of libvirt.
Sorry for all the confusion! It's tricky making tests work for multiple guest types, as well as with differing package versions.
The patch for KVM & XenFV support are all passed on my test machine, which is set up by F9, the libvirt version is either 0.4.0 or 0.4.1. So I don't reproduce the failing. Maybe I'm still in the confusion of how to make this patch work correctly, would you please give more detail instruction or add another patch for it? Thanks!
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

EAFP 01 is failing due to the disk pool issue. You'll need to create a function that makes sure the guest's disk resource belongs to a pool. I'd suggested this in a previous mail, and I know you said you were going to add such a function. But I haven't seen a patch for that yet - did I miss it?
I've created diskpool.conf file in EAFP.01 patch as follows. Any other file have to be created?
You'll need to create the diskpool file if the libvirt version <= 0.4.0 Otherwise, you'll need to create a virtual storage pool and make sure the guest's disks belong to that pool.
This is because the InstanceID used for the diskpool is incorrect. This InstanceID will work on libvirt versions older than 0.4.0, but newer versions need to use the diskpool ID.
If the libvirt 0.4.0 or newer, the disk pool is supported. But why I can't get th diskpool ID by wbemein? There is none output of wbemein below(the libvirt version is 0.4.1 on my test machine):
wbemein http://root:password@localhost:5988/root/virt:KVM_DiskPool
You'll need to create a storage pool. This process is similar to the network pool support. Here's some more info: http://libvirt.org/storage.html#StorageBackendDir
I'm guessing the XenFV system probably has an older version of libvirt.
Sorry for all the confusion! It's tricky making tests work for multiple guest types, as well as with differing package versions.
The patch for KVM & XenFV support are all passed on my test machine, which is set up by F9, the libvirt version is either 0.4.0 or 0.4.1. So I don't reproduce the failing. Maybe I'm still in the confusion of how to make this patch work correctly, would you please give more detail instruction or add another patch for it?
On your F9 system, which version of libvirt-cim are you running? Diskpool support went in changeset 496, which is after the release rpm was tagged. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (2)
-
Guo Lian Yun
-
Kaitlin Rupert