[PATCH v2 00/12] cimtest updates

This is primarily a repost of the previous series: https://www.redhat.com/archives/libvirt-cim/2013-March/msg00051.html and https://www.redhat.com/archives/libvirt-cim/2013-April/msg00014.html The primary difference in this patch vs. the previous sets is to fix the version string checking for nfs server checking in common_util.py and to add patch 12/12 which handles a problem in 'enum_volumes()' in pool.py. I assume patches 1-4 and 6-9 were reviewed without issue. So focus on 5/12 and 10-12/12. John Ferlan (12): Need to check "slp=true", not just "slp" since "slp=false" is possible Change the MAC from "99:" to "88:" Create a temporary directory for disk pool tests Use symbols as named in libvirt-cim for easier reference Fix nfs-server lookup code Fix os_status passing to reporter functions Resolve issues found in test. On Fedora systems default to using 'em1' instead of 'eth1' 19 - resolve issues found in test vxml: Add which volume could not be found to error message Add and utilize virsh_version_cmp pool: Need to handle when there are no volumes in the default pool .../Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- .../08_CreateDiskResourcePool.py | 30 +++++-- .../09_DeleteDiskPool.py | 19 +++-- .../10_create_storagevolume.py | 6 +- .../11_create_dir_storagevolume_errs.py | 5 +- .../12_create_netfs_storagevolume_errs.py | 5 +- .../13_delete_storagevolume.py | 5 +- .../14_delete_storagevolume_errs.py | 5 +- .../15_DiskPoolAutostart.py | 15 +++- .../SettingsDefineCapabilities/01_forward.py | 1 - .../libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../06_addresource.py | 2 +- .../08_modifyresource.py | 4 +- .../13_refconfig_additional_devs.py | 4 +- .../15_mod_system_settings.py | 11 ++- .../18_define_sys_bridge.py | 2 +- .../19_definenetwork_ers.py | 23 +++--- .../22_addmulti_brg_interface.py | 2 +- .../27_definesystem_macvtap_dev.py | 19 ++++- .../28_definesystem_with_vsi_profile.py | 15 ++++ suites/libvirt-cim/lib/XenKvmLib/common_util.py | 93 +++++++++++++++++----- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +- suites/libvirt-cim/lib/XenKvmLib/pool.py | 10 +-- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 +- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++ suites/libvirt-cim/main.py | 15 ++++ 28 files changed, 237 insertions(+), 86 deletions(-) -- 1.8.1.4

--- .../libvirt-cim/cimtest/Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/suites/libvirt-cim/cimtest/Profile/04_verify_libvirt_cim_slp_profiles.py b/suites/libvirt-cim/cimtest/Profile/04_verify_libvirt_cim_slp_profiles.py index f6cf5b1..f08d157 100644 --- a/suites/libvirt-cim/cimtest/Profile/04_verify_libvirt_cim_slp_profiles.py +++ b/suites/libvirt-cim/cimtest/Profile/04_verify_libvirt_cim_slp_profiles.py @@ -64,7 +64,7 @@ def get_slp_info(server): logger.info("Test not supported for sfcb yet ... hence skipping") return SKIP - cmd = "cimconfig -l -p | grep slp" + cmd = "cimconfig -l -p | grep slp=true" rc, out = run_remote(server, cmd) if rc != 0: logger.error("SLP is not enabled for the cimserver on '%s'", server) -- 1.8.1.4

Using a MAC starting with "99:" is not legal. That ends up being a Multicast Address since the low order bit of the first byte of the address (in hex) is one. That is "99" is 0x10011001" in hex, where the least significant bit is a "1" (one) indicating a multicast. --- suites/libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../cimtest/VirtualSystemManagementService/06_addresource.py | 2 +- .../cimtest/VirtualSystemManagementService/08_modifyresource.py | 4 ++-- .../VirtualSystemManagementService/13_refconfig_additional_devs.py | 4 ++-- .../cimtest/VirtualSystemManagementService/18_define_sys_bridge.py | 2 +- .../VirtualSystemManagementService/22_addmulti_brg_interface.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +++--- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/suites/libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py b/suites/libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py index c5d8a22..be82335 100644 --- a/suites/libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py +++ b/suites/libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py @@ -40,7 +40,7 @@ from XenKvmLib.const import get_provider_version sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] default_dom = 'uuid_domain' test_dom = 'test_domain' -nmac = '99:aa:bb:cc:ee:ff' +nmac = '88:aa:bb:cc:ee:ff' duplicate_uuid_support = 915 def get_vssd(ip, virt, dom): diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/06_addresource.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/06_addresource.py index fdcb84a..ac18c69 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/06_addresource.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/06_addresource.py @@ -38,7 +38,7 @@ from XenKvmLib.common_util import create_netpool_conf, destroy_netpool sup_types = ['Xen', 'KVM', 'XenFV'] default_dom = 'rstest_domain' -nmac = '99:aa:bb:cc:ee:ff' +nmac = '88:aa:bb:cc:ee:ff' ntype = 'network' npool_name = default_network_name + str(random.randint(1, 100)) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/08_modifyresource.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/08_modifyresource.py index df58d1f..a7c3871 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/08_modifyresource.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/08_modifyresource.py @@ -43,9 +43,9 @@ cpu = 2 ncpu = 1 nmem = 131072 new_int = randint(10, 99) -new_mac1 = "11:%s:22:%s:33:%s" % (new_int, new_int, new_int) +new_mac1 = "88:%s:22:%s:33:%s" % (new_int, new_int, new_int) new_int += 1 -new_mac2 = "11:%s:22:%s:33:%s" % (new_int, new_int, new_int) +new_mac2 = "88:%s:22:%s:33:%s" % (new_int, new_int, new_int) def cleanup_env(ip, cxml): cxml.destroy(ip) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/13_refconfig_additional_devs.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/13_refconfig_additional_devs.py index 6274527..a47db8f 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/13_refconfig_additional_devs.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/13_refconfig_additional_devs.py @@ -38,8 +38,8 @@ from XenKvmLib.vxml import get_class sup_types = ['Xen', 'XenFV', 'KVM'] test_dom = 'rstest_domain' test_dom2 = 'rstest_domain2' -mac1 = '99:aa:bb:cc:ee:ff' -mac2 = '99:aa:bb:cc:ee:aa' +mac1 = '88:aa:bb:cc:ee:ff' +mac2 = '88:aa:bb:cc:ee:aa' REQUESTED_STATE = 2 TIME = "00000000000000.000000:000" diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/18_define_sys_bridge.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/18_define_sys_bridge.py index e02de29..66b3e45 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/18_define_sys_bridge.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/18_define_sys_bridge.py @@ -32,7 +32,7 @@ from XenKvmLib.common_util import create_netpool_conf, destroy_netpool sup_types = ['Xen', 'KVM', 'XenFV'] default_dom = 'brgtest_domain' -nmac = '99:aa:bb:cc:ee:ff' +nmac = '88:aa:bb:cc:ee:ff' npool_name = default_network_name + str(random.randint(1, 100)) brg_name = "br" + str(random.randint(1, 100)) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/22_addmulti_brg_interface.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/22_addmulti_brg_interface.py index 9abb21b..36d1873 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/22_addmulti_brg_interface.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/22_addmulti_brg_interface.py @@ -36,7 +36,7 @@ sup_types = ['Xen', 'KVM', 'XenFV'] test_dom = "my_domain1" default_net = "my_network0" test_net = "my_network1" -test_mac = '99:aa:bb:cc:ee:ff' +test_mac = '88:aa:bb:cc:ee:ff' default_mac = "00:11:33:33:44:55" ntype = 'bridge' default_brg = 'mybr0' diff --git a/suites/libvirt-cim/lib/XenKvmLib/const.py b/suites/libvirt-cim/lib/XenKvmLib/const.py index 6dea644..6701d36 100755 --- a/suites/libvirt-cim/lib/XenKvmLib/const.py +++ b/suites/libvirt-cim/lib/XenKvmLib/const.py @@ -80,7 +80,7 @@ Xen_init_path = os.path.join(_image_dir, 'default-xen-initrd') Xen_disk_path = os.path.join(_image_dir, 'default-xen-dimage') Xen_secondary_disk_path = os.path.join(_image_dir, 'default-xen-dimage.2ND') Xen_default_disk_dev = 'xvda' -Xen_default_mac = '11:22:33:aa:bb:cc' +Xen_default_mac = '88:22:33:aa:bb:cc' # vxml.KVMXML KVM_default_emulator = '/usr/bin/qemu-system-x86_64' @@ -88,7 +88,7 @@ KVM_disk_path = os.path.join(_image_dir, 'default-kvm-dimage') KVM_secondary_disk_path = os.path.join(_image_dir, 'default-kvm-dimage.2ND') KVM_default_disk_dev = 'hda' KVM_default_cdrom_dev = 'hdc' -KVM_default_mac = '11:22:33:aa:bb:cc' +KVM_default_mac = '88:22:33:aa:bb:cc' # vxml.XenFVXML s, o = platform.architecture() @@ -109,7 +109,7 @@ LXC_default_emulator = '/usr/libexec/libvirt_lxc' LXC_default_tty = '/dev/ptmx' LXC_default_mp = '/tmp' LXC_default_source = '/var/lib/libvirt/images/lxc_files' -LXC_default_mac = '11:22:33:aa:bb:cc' +LXC_default_mac = '88:22:33:aa:bb:cc' LXC_netns_support = False parser = OptionParser() diff --git a/suites/libvirt-cim/lib/XenKvmLib/test_xml.py b/suites/libvirt-cim/lib/XenKvmLib/test_xml.py index b32ae4c..914dbb0 100755 --- a/suites/libvirt-cim/lib/XenKvmLib/test_xml.py +++ b/suites/libvirt-cim/lib/XenKvmLib/test_xml.py @@ -42,7 +42,7 @@ kernel_path = os.path.join(image_dir, 'default-xen-kernel') init_path = os.path.join(image_dir, 'default-xen-initrd') disk_path = os.path.join(image_dir, 'default-xen-dimage') -default_mac = '11:22:33:aa:bb:cc' +default_mac = '88:22:33:aa:bb:cc' def testxml(test_dom="domU1", mem = 128, vcpus = 1, mac = default_mac, disk_file_path = disk_path, disk = "xvda"): -- 1.8.1.4

We cannot have two storage pools referencing the same path. Since the cimtest-diskpool is already created at /var/lib/libvirt/images, we'll use that (e.g. _image_dir symbol) to create a subdirectory for the test. We'll also delete that directory when we're done. --- .../08_CreateDiskResourcePool.py | 26 +++++++++++++++++----- .../09_DeleteDiskPool.py | 19 +++++++++++----- .../15_DiskPoolAutostart.py | 11 ++++++++- 3 files changed, 43 insertions(+), 13 deletions(-) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py index c82b5b0..636f59c 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py @@ -48,6 +48,7 @@ # -Date: 26.05.2009 import sys +import os from CimTest.Globals import logger from XenKvmLib.xm_virt_util import virsh_version from CimTest.ReturnCodes import FAIL, PASS, SKIP @@ -55,8 +56,9 @@ from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import destroy_diskpool, nfs_netfs_setup, \ netfs_cleanup -from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool -from XenKvmLib.const import get_provider_version +from XenKvmLib.pool import create_pool, verify_pool, undefine_diskpool, \ + DIR_POOL, NETFS_POOL +from XenKvmLib.const import get_provider_version, _image_dir libvirt_disk_pool_support=837 libvirt_netfs_pool_support=869 @@ -94,19 +96,20 @@ def main(): curr_cim_rev, changeset = get_provider_version(virt, server) if curr_cim_rev >= libvirt_disk_pool_support: - dp_types["DISK_POOL_DIR"] = 1 + dp_types["DISK_POOL_DIR"] = DIR_POOL if curr_cim_rev >= libvirt_netfs_pool_support: - dp_types["DISK_POOL_NETFS"] = 3 + dp_types["DISK_POOL_NETFS"] = NETFS_POOL if len(dp_types) == 0 : - logger.info("No disk pool types in list , hence skipping the test...") + logger.info("No disk pool types in list, hence skipping the test...") return SKIP status = FAIL pool_attr = None # For now the test case support only the creation of # dir type disk pool, netfs later change to fs and disk pooltypes etc - for key, value in dp_types.iteritems(): + for key, value in dp_types.iteritems(): + del_path = False try: logger.info("Verifying '%s'.....", key) test_pool = key @@ -115,6 +118,15 @@ def main(): if status != PASS: return FAIL + # Cannot have two pools that use the same location/path, so + # since cimtest-diskpool already exists + if key == 'DISK_POOL_DIR': + path = os.path.join(_image_dir, 'temppool') + if not os.path.exists(path): + os.mkdir(path) + del_path = True + pool_attr["Path"] = path + status = create_pool(server, virt, test_pool, pool_attr, mode_type=value, pool_type= "DiskPool") @@ -152,6 +164,8 @@ def main(): if key == 'DISK_POOL_NETFS': netfs_cleanup(server, pool_attr) + if del_path: + os.rmdir(path) return status if __name__ == "__main__": diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py index 31e3f22..8bd15e2 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py @@ -44,6 +44,7 @@ # -Date: 26.05.2009 import sys +import os import pywbem from XenKvmLib import rpcs_service from CimTest.Globals import logger @@ -88,13 +89,17 @@ def main(): elif curr_cim_rev >= libvirt_cim_child_pool_rev: + del_path = False try: - pool_attr = { "Path" : _image_dir } + path = os.path.join(_image_dir, 'deltest') + if not os.path.exists(path): + os.mkdir(path) + del_path = True + pool_attr = { "Path" : path } status = create_pool(server, virt, test_pool, pool_attr, pool_type="DiskPool", mode_type=TYPE) if status != PASS: - logger.error("Failed to create diskpool '%s'", test_pool) - return status + raise Exception("Failed to create diskpool '%s'" % test_pool) status = verify_pool(server, virt, test_pool, pool_attr, pool_type="DiskPool") @@ -112,8 +117,8 @@ def main(): break if pool_settings == None: - logger.error("Failed to get poolsettings for '%s'", test_pool) - return FAIL + raise Exception("Failed to get poolsettings for '%s'" \ + % test_pool) rpcs_conn.DeleteResourcePool(Pool = pool_settings) pool = EnumInstances(server, dp) @@ -127,8 +132,10 @@ def main(): logger.error("Exception details: %s", details) destroy_diskpool(server, virt, test_pool) undefine_diskpool(server, virt, test_pool) - return FAIL + status = FAIL + if del_path: + os.rmdir(path) return status if __name__ == "__main__": diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py index b6b758c..b7e72a8 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py @@ -23,6 +23,7 @@ # -Date: 04.14.2011 import sys +import os from pywbem import cim_types from CimTest.Globals import logger from XenKvmLib.xm_virt_util import virsh_version @@ -77,12 +78,17 @@ def main(): pool_attr = None key = 'DISK_POOL_DIR' value = 1 + del_path = False try: logger.info("Verifying '%s'.....", key) test_pool = key - pool_attr = { "Path" : "/var/lib/libvirt/images", + pool_attr = { "Path" : "/var/lib/libvirt/images/autotest", "Autostart" : cim_types.Uint16(1) } + if not os.path.exists(pool_attr["Path"]): + os.mkdir(pool_attr["Path"]) + del_path = True + status = create_pool(server, virt, test_pool, pool_attr, mode_type=value, pool_type= "DiskPool") @@ -112,6 +118,9 @@ def main(): status = FAIL logger.error("Exception details: %s", details) + if del_path: + os.rmdir(pool_attr["Path"]) + return status if __name__ == "__main__": -- 1.8.1.4

--- .../VirtualSystemManagementService/15_mod_system_settings.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/15_mod_system_settings.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/15_mod_system_settings.py index e576fca..0be3ae0 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/15_mod_system_settings.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/15_mod_system_settings.py @@ -37,7 +37,10 @@ from XenKvmLib.xm_virt_util import domain_list, active_domain_list, \ sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] default_dom = 'rstest_domain' cpu = 2 -RECOVERY_VAL = 3 +# AutomaticRecoveryAction +NONE_VAL = 2 +RESTART_VAL = 3 + DEFINED_STATE = 3 bug = "00008" f9_bug = "00010" @@ -113,7 +116,7 @@ def main(): raise Expcetion("Failed to get the VSSD instance for %s" % \ default_dom) - val = pywbem.cim_types.Uint16(RECOVERY_VAL) + val = pywbem.cim_types.Uint16(RESTART_VAL) inst['AutomaticRecoveryAction'] = val vssd = inst_to_mof(inst) @@ -135,9 +138,9 @@ def main(): raise Exception("Failed to get the VSSD instance for %s" % \ default_dom) - if inst.AutomaticRecoveryAction != RECOVERY_VAL: + if inst.AutomaticRecoveryAction != RESTART_VAL: logger.error("Exp AutomaticRecoveryAction=%d, got %d", - RECOVERY_VAL, inst.AutomaticRecoveryAction) + RESTART_VAL, inst.AutomaticRecoveryAction) raise Exception("%s not updated properly" % default_dom) status = PASS -- 1.8.1.4

The 'get_nfs_bin()', 'nfs_config()', and 'nfs_netfs_setup()' API's needed adjustment to handle using systemd rather than looking for /etc/init.d/nfs --- suites/libvirt-cim/lib/XenKvmLib/common_util.py | 84 ++++++++++++++++++++----- 1 file changed, 68 insertions(+), 16 deletions(-) diff --git a/suites/libvirt-cim/lib/XenKvmLib/common_util.py b/suites/libvirt-cim/lib/XenKvmLib/common_util.py index f145811..9305c5e 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py @@ -493,27 +493,72 @@ def parse_instance_id(instid): def get_nfs_bin(server): cmd = 'cat /etc/issue | grep -v ^$ | egrep "Red Hat|Fedora"' rc, out = utils.run_remote(server, cmd) + is_systemd = 0 if rc != 0: #SLES nfs_server_bin = "/etc/init.d/nfsserver" else: + # Default, but allow it to be changed nfs_server_bin = "/etc/init.d/nfs" - return nfs_server_bin - -def nfs_config(server, nfs_server_bin): - cmd = "ps aux | grep -v -e nfsiod -e grep | grep nfsd" + # Check for 'systemd' being used + # Fedora 15 seems to have been the first place this was the default + # RHEL 7 and beyond will also use the mechanism. + # Since 'out' returns the fetched string, let's parse it a bit more + # looking for the number after the string 'release' which happens to + # be the version. + # + # On Fedora systems it's + # + # "Fedora release 18 (Spherical Cow)" + # + # while on RHEL systems it's: + # + # "Red Hat Enterprise Linux Server release 6.4 (Santiago)" + # + # This nasty thing finds 'release ' in the line, splits there and + # then uses index [1] as the point of reference of the version number + # string to split again in order to return index [0] as a string + # representation of the OS version. + # + vers_str = out.split('release ')[1].split()[0] + if 'Fedora' in out and int(vers_str) >= 15 or \ + 'Red Hat' in out and float(vers_str) >= 7.0: + # Handle this differently - the command would be + # "systemctl {start|restart|status} nfs" + nfs_server_bin = "systemctl %s nfs" + is_systemd = 1 + + return nfs_server_bin, is_systemd + +def nfs_config(server, nfs_server_bin, is_systemd): + if is_systemd == 0: + cmd = "ps aux | grep -v -e nfsiod -e grep | grep nfsd" + else: + cmd = "systemctl | grep nfs-server" rc, out = utils.run_remote(server, cmd) # if NFS services is not found on the machine, start it.. if rc != PASS : # Check if NFS server is installed ... - if not os.path.exists(nfs_server_bin): - logger.error("NFS server '%s' does not seem to be installed "\ - "on '%s'", nfs_server_bin, server) - return SKIP + if is_systemd == 0: + if not os.path.exists(nfs_server_bin): + logger.error("NFS server '%s' does not seem to be installed "\ + "on '%s'", nfs_server_bin, server) + return SKIP + else: + # Works on Fedora and RHEL6 + cmd = "rpm -q nfs-utils" + rc, out = utils.run_remote(server, cmd) + if rc != PASS : + logger.error("NFS server package nfs-utils does not seem "\ + "to be installed on '%s'", server) + return SKIP # Start the nfs server ... - nfs_server_cmd = "%s start" % nfs_server_bin + if is_systemd == 0: + nfs_server_cmd = "%s start" % nfs_server_bin + else: + nfs_server_cmd = nfs_server_bin % "start" rc, out = utils.run_remote(server, nfs_server_cmd) if rc != PASS: logger.error("Could not start the nfsserver on '%s'", server) @@ -573,13 +618,16 @@ def netfs_cleanup(server, pool_attr): move_file(back_exports_file, exports_file) # restart the nfs server - nfs_server_bin = get_nfs_bin(server) - nfs_server_cmd = "%s restart" % nfs_server_bin + nfs_server_bin, is_systemd = get_nfs_bin(server) + if is_systemd == 0: + nfs_server_cmd = "%s restart" % nfs_server_bin + else: + nfs_server_cmd = nfs_server_bin % "restart" rc, out = utils.run_remote(server, nfs_server_cmd) if rc != PASS: logger.error("Could not restart NFS server on '%s'" % server) -def netfs_config(server, nfs_server_bin, dest_dir_to_mnt): +def netfs_config(server, nfs_server_bin, dest_dir_to_mnt, is_systemd): src_dir_for_mnt = mkdtemp() try: @@ -601,7 +649,10 @@ def netfs_config(server, nfs_server_bin, dest_dir_to_mnt): % (src_dir_for_mnt, dest_dir_to_mnt)) # Restart the nfs server.... - nfs_server_cmd = "%s restart" % nfs_server_bin + if is_systemd == 0: + nfs_server_cmd = "%s restart" % nfs_server_bin + else: + nfs_server_cmd = nfs_server_bin % "restart" rc, out = utils.run_remote(server, nfs_server_cmd) if rc != PASS: raise Exception("Could not restart NFS server on '%s'" % server) @@ -615,12 +666,12 @@ def netfs_config(server, nfs_server_bin, dest_dir_to_mnt): return PASS, src_dir_for_mnt def nfs_netfs_setup(server): - nfs_server_bin = get_nfs_bin(server) + nfs_server_bin, is_systemd = get_nfs_bin(server) dest_dir = mkdtemp() # Before going ahead verify that nfs server is available on machine.. - ret = nfs_config(server, nfs_server_bin) + ret = nfs_config(server, nfs_server_bin, is_systemd) if ret != PASS: logger.error("Failed to configure NFS on '%s'", server) logger.info("Trying to look for nfs mounted dir on '%s'...", server) @@ -631,7 +682,8 @@ def nfs_netfs_setup(server): else: return PASS, server, src_dir, dest_dir else: - ret, src_dir = netfs_config(server, nfs_server_bin, dest_dir) + ret, src_dir = netfs_config(server, nfs_server_bin, \ + dest_dir, is_systemd) if ret != PASS: logger.error("Failed to configure netfs on '%s'", server) return ret, None, None, None -- 1.8.1.4

If the status returned is an errno value, then os_status will be set outside of the bounds of the 'rc' array in Reporter.py which will cause a KeyError exception. Find, message, and adjust - we're failing anyway. --- suites/libvirt-cim/main.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/suites/libvirt-cim/main.py b/suites/libvirt-cim/main.py index a4e33e6..e5f3526 100644 --- a/suites/libvirt-cim/main.py +++ b/suites/libvirt-cim/main.py @@ -23,12 +23,14 @@ # from time import time +from time import sleep from optparse import OptionParser import os import sys sys.path.append('../../lib') import TestSuite from CimTest.Globals import logger, log_param +from CimTest.ReturnCodes import PASS, FAIL, XFAIL, SKIP import commands from VirtLib import groups import ConfigParser @@ -274,6 +276,14 @@ def main(options, args): os_status = os.WEXITSTATUS(status) + # status should be from our test; however, if there's an OS level + # failure, it could be set to errno. But that's included in our + # output, so just set it to FAIL; otherwise, we get a KeyError + # in Reporter.py when trying to index it's 'rc' record + if os_status not in (PASS, FAIL, XFAIL, SKIP): + logger.error("Changing os_status from %d to FAIL", os_status) + os_status = FAIL + testsuite.print_results(test['group'], test['test'], os_status, output) exec_time = end_time - start_time @@ -282,6 +292,11 @@ def main(options, args): if options.print_exec_time: print_exec_time(testsuite, exec_time, " Test execution time:") + # Give ourselves a 3 second pause before running the next + # test to help ensure we have cleaned things up properly just + # in case the cimserver is a little slow to respond + sleep(3) + testsuite.debug("%s\n" % div) if options.print_exec_time: -- 1.8.1.4

1. Use a MAC that isn't a multicast MAC. 2. Error messages changed in libvirt "0.9.8", so we need to account for that --- .../19_definenetwork_ers.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py index ad7c1ce..4cda327 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py @@ -37,7 +37,7 @@ from XenKvmLib.xm_virt_util import virsh_version sup_types = ['Xen', 'KVM', 'XenFV'] default_dom = 'brgtest_domain' -nmac = '99:aa:bb:cc:ee:ff' +nmac = '88:aa:bb:cc:ee:ff' npool_name = default_network_name + str(random.randint(1, 100)) brg_name = "br" + str(random.randint(1, 100)) @@ -89,11 +89,16 @@ def main(): expected_values['invalid']['bridge'] = inv_br_str else: + logger.error('DEBUG libvirt_version=%s', libvirt_version) if libvirt_version >= "0.7.0": expected_values['empty']['network'] = inv_empty_network expected_values['invalid']['network'] = inv_empty_network - expected_values['invalid']['bridge'] = " Failed to add tap "\ - "interface to bridge" + if libvirt_version >= "0.9.8": + expected_values['invalid']['bridge'] = "Cannot get interface "\ + "MTU on 'invalid'" + else: + expected_values['invalid']['bridge'] = " Failed to add tap "\ + "interface to bridge" tc_scen = { @@ -114,7 +119,8 @@ def main(): status = PASS for nettype in nettypes: for tc, field in tc_scen.iteritems(): - logger.error("DEBUG nettype is %s, field is %s, tc is %s", nettype, field, tc) + logger.error("DEBUG nettype is %s, field is %s, tc is %s", + nettype, field, tc) cxml = vxml.get_class(options.virt)(default_dom, mac=nmac, ntype=nettype, net_name=field) @@ -122,7 +128,7 @@ def main(): try: ret = cxml.cim_define(options.ip) - if not ret: + if not ret: status = verify_error(exp_rc, exp_desc, cxml) if status != PASS: # There are few libvirt version between 0.7.0 @@ -138,7 +144,7 @@ def main(): cxml.err_rc, cxml.err_desc)) continue ret = cxml.cim_start(options.ip) - if ret: + if ret: status = verify_error(exp_rc, exp_desc, cxml) cxml.undefine(options.ip) if status != PASS: -- 1.8.1.4

Fedora changed the default device naming scheme, see http://fedoraproject.org/wiki/Features/ConsistentNetworkDeviceNaming So if we're running on Fedora, let's "default" to "em1" although that doesn't guarantee that we will find what we're looking for. This code probably needs some mechanism to detect with interfaces are available, then query those interfaces to find one that supports the feature we want. --- .../27_definesystem_macvtap_dev.py | 19 ++++++++++++++++++- .../28_definesystem_with_vsi_profile.py | 15 +++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/27_definesystem_macvtap_dev.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/27_definesystem_macvtap_dev.py index 8c20781..36bf52f 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/27_definesystem_macvtap_dev.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/27_definesystem_macvtap_dev.py @@ -32,6 +32,7 @@ import sys from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS, SKIP +from VirtLib import utils from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.rasd import get_default_rasds from XenKvmLib.const import do_main, get_provider_version @@ -101,6 +102,7 @@ def verify_net_rasd(ip, virt, target_dev, source_dev, guest_name): @do_main(sup_types) def main(): options = main.options + server = options.ip status = FAIL @@ -113,7 +115,22 @@ def main(): cxml = get_class(options.virt)(test_dom) target_dev = "vepa" - source_dev = "eth1" + + # Fedora changed the default device naming scheme, see + # http://fedoraproject.org/wiki/Features/ConsistentNetworkDeviceNaming + # + # So if we're running on Fedora, let's "default" to "em1" although that + # doesn't guarantee that we will find what we're looking for. This code + # probably needs some mechanism to detect with interfaces are available, + # then query those interfaces to find one that supports the feature we + # want. If not are found and XFAIL could be generated indicating so. + # + cmd = 'cat /etc/issue | grep -v ^$ | egrep "Fedora"' + rc, out = utils.run_remote(server, cmd) + if rc == 0: + source_dev = "em1" + else: + source_dev = "eth1" guest_defined = False diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/28_definesystem_with_vsi_profile.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/28_definesystem_with_vsi_profile.py index d14dda3..2b108f9 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/28_definesystem_with_vsi_profile.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/28_definesystem_with_vsi_profile.py @@ -35,6 +35,7 @@ import sys from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS, SKIP, XFAIL_RC, XFAIL +from VirtLib import utils from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.rasd import get_default_rasds from XenKvmLib.const import do_main, get_provider_version @@ -173,6 +174,20 @@ def main(): 'VSITypeIDVersion' : "1" } + # Fedora changed the default device naming scheme, see + # http://fedoraproject.org/wiki/Features/ConsistentNetworkDeviceNaming + # + # So if we're running on Fedora, let's "default" to "em1" although that + # doesn't guarantee that we will find what we're looking for. This code + # probably needs some mechanism to detect with interfaces are available, + # then query those interfaces to find one that supports the feature we + # want. If not are found and XFAIL could be generated indicating so. + # + cmd = 'cat /etc/issue | grep -v ^$ | egrep "Fedora"' + rc, out = utils.run_remote(server, cmd) + if rc == 0: + vsi_defaults['SourceDevice'] = "em1" + nrasd_cn = get_typed_class(virt, 'NetResourceAllocationSettingData') status = FAIL cxml = None -- 1.8.1.4

--- .../cimtest/VirtualSystemManagementService/19_definenetwork_ers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py index 4cda327..cf461d1 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py @@ -89,7 +89,6 @@ def main(): expected_values['invalid']['bridge'] = inv_br_str else: - logger.error('DEBUG libvirt_version=%s', libvirt_version) if libvirt_version >= "0.7.0": expected_values['empty']['network'] = inv_empty_network expected_values['invalid']['network'] = inv_empty_network -- 1.8.1.4

--- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/suites/libvirt-cim/lib/XenKvmLib/vxml.py b/suites/libvirt-cim/lib/XenKvmLib/vxml.py index 51a4166..82ab501 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py @@ -921,7 +921,7 @@ class KVMXML(VirtXML, VirtCIM): port_num='-1', keymap="en-us", irstype="mouse", btype="ps2", vnc_passwd=None): if not os.path.exists(disk_file_path): - logger.error('Error: Disk image does not exist') + logger.error('Error: Disk image %s does not exist', disk_file_path) sys.exit(1) VirtXML.__init__(self, 'kvm', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'KVM', test_dom, uuid, pae, acpi, apic, disk, @@ -984,7 +984,7 @@ class XenFVXML(VirtXML, VirtCIM): keymap="en-us", irstype="mouse", btype="ps2", vnc_passwd=None): if not os.path.exists(disk_file_path): - logger.error('Error: Disk image does not exist') + logger.error('Error: Disk image %s does not exist', disk_file_path) sys.exit(1) VirtXML.__init__(self, 'xenfv', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'XenFV', test_dom, uuid, pae, acpi, apic, disk, -- 1.8.1.4

Reviewed-by: Xu Wang <cngesaint@outlook.com> ----------------------------------------
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:12 -0400 Subject: [Libvirt-cim] [PATCH v2 10/12] vxml: Add which volume could not be found to error message
--- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/suites/libvirt-cim/lib/XenKvmLib/vxml.py b/suites/libvirt-cim/lib/XenKvmLib/vxml.py index 51a4166..82ab501 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py @@ -921,7 +921,7 @@ class KVMXML(VirtXML, VirtCIM): port_num='-1', keymap="en-us", irstype="mouse", btype="ps2", vnc_passwd=None): if not os.path.exists(disk_file_path): - logger.error('Error: Disk image does not exist') + logger.error('Error: Disk image %s does not exist', disk_file_path) sys.exit(1) VirtXML.__init__(self, 'kvm', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'KVM', test_dom, uuid, pae, acpi, apic, disk, @@ -984,7 +984,7 @@ class XenFVXML(VirtXML, VirtCIM): keymap="en-us", irstype="mouse", btype="ps2", vnc_passwd=None): if not os.path.exists(disk_file_path): - logger.error('Error: Disk image does not exist') + logger.error('Error: Disk image %s does not exist', disk_file_path) sys.exit(1) VirtXML.__init__(self, 'xenfv', test_dom, set_uuid(), mem, vcpus) VirtCIM.__init__(self, 'XenFV', test_dom, uuid, pae, acpi, apic, disk, -- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

The string version comparisons failed when determining if "0.10.2" was a greater version than "0.4.1" (and other similar checks). Needed to create a version comparison method that did the right thing. Also, not change to 12_create_netfs_storagevolume_errs.py to use "or" logic rather than "and" logic on comparison. Other tests within the family use "or" logic. --- .../08_CreateDiskResourcePool.py | 4 ++-- .../10_create_storagevolume.py | 6 ++++-- .../11_create_dir_storagevolume_errs.py | 5 +++-- .../12_create_netfs_storagevolume_errs.py | 5 +++-- .../13_delete_storagevolume.py | 5 +++-- .../14_delete_storagevolume_errs.py | 5 +++-- .../ResourcePoolConfigurationService/15_DiskPoolAutostart.py | 4 ++-- .../cimtest/SettingsDefineCapabilities/01_forward.py | 1 - .../VirtualSystemManagementService/19_definenetwork_ers.py | 8 ++++---- suites/libvirt-cim/lib/XenKvmLib/common_util.py | 9 +++++---- suites/libvirt-cim/lib/XenKvmLib/pool.py | 6 +++--- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++++++---- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++++++ 13 files changed, 44 insertions(+), 30 deletions(-) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py index 636f59c..b5ec5dc 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py @@ -50,7 +50,7 @@ import sys import os from CimTest.Globals import logger -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class @@ -89,7 +89,7 @@ def main(): dp_types = { } libvirt_version = virsh_version(server, virt) - if libvirt_version < "0.4.1": + if virsh_version_cmp(libvirt_version, "0.4.1") < 0: logger.info("Storage pool creation support is available in Libvirt " "version >= 0.4.1 , hence skipping the test....") return SKIP diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py index 511463b..3b0673f 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py @@ -38,7 +38,8 @@ from XenKvmLib.rasd import libvirt_rasd_storagepool_changes from XenKvmLib import rpcs_service from XenKvmLib.assoc import Associators from XenKvmLib.enumclass import GetInstance, EnumNames -from XenKvmLib.xm_virt_util import virsh_version, vol_list, vol_delete +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp, \ + vol_list, vol_delete from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.common_util import destroy_diskpool from XenKvmLib.pool import create_pool, undefine_diskpool, DIR_POOL @@ -186,7 +187,8 @@ def main(): libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py index 76e1e8f..318cfa8 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py @@ -36,7 +36,7 @@ from CimTest.Globals import logger from XenKvmLib import rpcs_service from pywbem.cim_types import Uint64 from pywbem import CIM_ERR_FAILED, CIMError -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.rasd import libvirt_rasd_storagepool_changes @@ -125,7 +125,8 @@ def main(): libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py index 004af9f..215727f 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py @@ -36,7 +36,7 @@ from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup, get_provider_version from XenKvmLib.rasd import libvirt_rasd_storagepool_changes from XenKvmLib import rpcs_service -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.common_util import nfs_netfs_setup, netfs_cleanup from XenKvmLib.pool import create_pool, NETFS_POOL, get_diskpool, \ @@ -123,7 +123,8 @@ def main(): libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" and cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py index d7a6365..3578532 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py @@ -29,7 +29,7 @@ import sys from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS, SKIP -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.const import do_main, platform_sup, get_provider_version, \ default_pool_name, _image_dir from XenKvmLib import rpcs_service @@ -50,7 +50,8 @@ def main(): libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_spool_del_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_spool_del_changes: logger.info("Storage Volume deletion support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_spool_del_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py index 9e33215..d7ed5ad 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py @@ -33,7 +33,7 @@ from VirtLib import utils from CimTest.Globals import logger from pywbem import CIM_ERR_FAILED, CIM_ERR_INVALID_PARAMETER, CIMError from CimTest.ReturnCodes import FAIL, PASS, SKIP -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.const import do_main, platform_sup, get_provider_version,\ default_pool_name, _image_dir from XenKvmLib import rpcs_service @@ -119,7 +119,8 @@ def main(): libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_spool_del_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_spool_del_changes: logger.info("Storage Volume deletion support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_spool_del_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py index b7e72a8..3cf6321 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py @@ -26,7 +26,7 @@ import sys import os from pywbem import cim_types from CimTest.Globals import logger -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class @@ -62,7 +62,7 @@ def main(): dp_types = { } libvirt_version = virsh_version(server, virt) - if libvirt_version < "0.4.1": + if virsh_version_cmp(libvirt_version, "0.4.1") < 0: logger.info("Storage pool creation support is available in Libvirt " "version >= 0.4.1 , hence skipping the test....") return SKIP diff --git a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py index 555e3c1..dd19ca0 100644 --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py @@ -57,7 +57,6 @@ import os from distutils.file_util import move_file from XenKvmLib import assoc from XenKvmLib import enumclass -from XenKvmLib.xm_virt_util import virsh_version from CimTest.ReturnCodes import PASS, FAIL, SKIP from CimTest.Globals import logger, CIM_ERROR_GETINSTANCE, \ CIM_ERROR_ASSOCIATORS diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py index cf461d1..19294db 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py @@ -33,7 +33,7 @@ from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS from XenKvmLib.const import default_network_name, do_main, get_provider_version from XenKvmLib.common_util import create_netpool_conf, destroy_netpool -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp sup_types = ['Xen', 'KVM', 'XenFV'] default_dom = 'brgtest_domain' @@ -71,7 +71,7 @@ def main(): libvirt_version = virsh_version(options.ip, options.virt) inv_empty_network = "Network not found" if options.virt == "Xen" or options.virt == "XenFV": - if libvirt_version <= "0.3.3": + if virsh_version_cmp(libvirt_version, "0.3.3") <= 0: inv_empty_network = "no network with matching name" inv_br_str = "POST operation failed: (xend.err 'Device 0 (vif) " + \ @@ -89,10 +89,10 @@ def main(): expected_values['invalid']['bridge'] = inv_br_str else: - if libvirt_version >= "0.7.0": + if virsh_version_cmp(libvirt_version, "0.7.0") >= 0: expected_values['empty']['network'] = inv_empty_network expected_values['invalid']['network'] = inv_empty_network - if libvirt_version >= "0.9.8": + if virsh_version_cmp(libvirt_version, "0.9.8") >= 0: expected_values['invalid']['bridge'] = "Cannot get interface "\ "MTU on 'invalid'" else: diff --git a/suites/libvirt-cim/lib/XenKvmLib/common_util.py b/suites/libvirt-cim/lib/XenKvmLib/common_util.py index 9305c5e..f05fdc0 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py @@ -38,7 +38,8 @@ from XenKvmLib.classes import get_typed_class from CimTest.Globals import logger, CIM_ERROR_ENUMERATE, \ CIM_ERROR_GETINSTANCE from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC, SKIP -from XenKvmLib.xm_virt_util import diskpool_list, virsh_version, net_list,\ +from XenKvmLib.xm_virt_util import diskpool_list, virsh_version,\ + virsh_version_cmp, net_list,\ domain_list, virt2uri, net_destroy from XenKvmLib.vxml import PoolXML, NetXML from VirtLib import utils @@ -308,7 +309,7 @@ def cleanup_restore(server, virt): # libvirt_version >= 0.4.1 # Hence Skipping the logic to delete the new conf file # and just returning PASS - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: return status try: if os.path.exists(back_disk_file): @@ -365,7 +366,7 @@ def create_diskpool(server, virt='KVM', dpool=default_pool_name, def create_diskpool_conf(server, virt, dpool=default_pool_name): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: status, dpoolname = create_diskpool(server, virt, dpool) diskid = "%s/%s" % ("DiskPool", dpoolname) else: @@ -376,7 +377,7 @@ def create_diskpool_conf(server, virt, dpool=default_pool_name): def destroy_diskpool(server, virt, dpool): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: if dpool == None: logger.error("No disk pool specified") return FAIL diff --git a/suites/libvirt-cim/lib/XenKvmLib/pool.py b/suites/libvirt-cim/lib/XenKvmLib/pool.py index a5ca331..1a57aba 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py @@ -35,7 +35,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF from XenKvmLib.vxml import NetXML, PoolXML -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.vsms import RASD_TYPE_STOREVOL from XenKvmLib.common_util import destroy_diskpool @@ -183,7 +183,7 @@ def undefine_netpool(server, virt, net_name): def undefine_diskpool(server, virt, dp_name): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: if dp_name == None: return FAIL @@ -285,7 +285,7 @@ def verify_pool(server, virt, poolname, pool_attr_list, mode_type=0, ret_mode = net_xml.xml_get_netpool_mode() libvirt_version = virsh_version(server, virt) #Forward mode support was added in 0.4.2 - if libvirt_version >= '0.4.2': + if virsh_version_cmp(libvirt_version, '0.4.2') >= 0: if mode_type == 1 and ret_mode != "nat": logger.error("Error when verifying 'nat' type network") return FAIL diff --git a/suites/libvirt-cim/lib/XenKvmLib/rasd.py b/suites/libvirt-cim/lib/XenKvmLib/rasd.py index d65011e..4d4240a 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py @@ -31,7 +31,7 @@ from XenKvmLib.assoc import Associators from XenKvmLib.const import default_pool_name, default_network_name, \ get_provider_version, default_net_type from XenKvmLib.pool import enum_volumes -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.common_util import parse_instance_id pasd_cn = 'ProcResourceAllocationSettingData' @@ -81,7 +81,8 @@ def rasd_init_list(vsxml, virt, t_disk, t_dom, t_mac, t_mem, server): libvirt_version = virsh_version(server, virt) - if virt == 'LXC' or (virt == 'XenFV' and libvirt_version < "0.6.3"): + if virt == 'LXC' or (virt == 'XenFV' and \ + virsh_version_cmp(libvirt_version, "0.6.3") < 0): point_device = "%s/%s" %(t_dom, "mouse:usb") elif virt == 'Xen': point_device = "%s/%s" %(t_dom, "mouse:xen") @@ -357,7 +358,8 @@ def get_exp_disk_rasd_len(virt, ip, rev, id): rev < libvirt_rasd_new_changes: exp_len = exp_base_num + exp_cdrom - elif rev >= libvirt_rasd_dpool_changes and libvirt_ver >= '0.4.1': + elif rev >= libvirt_rasd_dpool_changes and \ + virsh_version_cmp(libvirt_ver, '0.4.1') >= 0: volumes = enum_volumes(virt, ip) if rev >= libvirt_rasd_floppy_changes: exp_len = ((volumes * exp_base_num) + \ @@ -383,7 +385,7 @@ def get_exp_disk_rasd_len(virt, ip, rev, id): exp_len = (volumes * exp_base_num) + exp_cdrom - if virt != 'LXC' and libvirt_ver >= '0.4.1': + if virt != 'LXC' and virsh_version_cmp(libvirt_ver, '0.4.1') >= 0: if rev >= libvirt_rasd_storagepool_changes: exp_len += exp_storagevol_rasd diff --git a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py index f365a54..7749fb7 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py +++ b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py @@ -23,6 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os +import re from VirtLib import utils import socket from VirtLib.live import fv_cap @@ -223,6 +224,11 @@ def network_by_bridge(bridge, server, virt="Xen"): return None +def virsh_version_cmp(version1, version2): + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] + return cmp(normalize(version1), normalize(version2)) + def virsh_version(server, virt="KVM"): cmd = "virsh -c %s -v 2>/dev/null" % virt2uri(virt) ret, out = utils.run_remote(server, cmd) -- 1.8.1.4

Reviewed-by: Xu Wang <cngesaint@outlook.com> ----------------------------------------
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:13 -0400 Subject: [Libvirt-cim] [PATCH v2 11/12] Add and utilize virsh_version_cmp
The string version comparisons failed when determining if "0.10.2" was a greater version than "0.4.1" (and other similar checks). Needed to create a version comparison method that did the right thing.
Also, not change to 12_create_netfs_storagevolume_errs.py to use "or" logic rather than "and" logic on comparison. Other tests within the family use "or" logic. --- .../08_CreateDiskResourcePool.py | 4 ++-- .../10_create_storagevolume.py | 6 ++++-- .../11_create_dir_storagevolume_errs.py | 5 +++-- .../12_create_netfs_storagevolume_errs.py | 5 +++-- .../13_delete_storagevolume.py | 5 +++-- .../14_delete_storagevolume_errs.py | 5 +++-- .../ResourcePoolConfigurationService/15_DiskPoolAutostart.py | 4 ++-- .../cimtest/SettingsDefineCapabilities/01_forward.py | 1 - .../VirtualSystemManagementService/19_definenetwork_ers.py | 8 ++++---- suites/libvirt-cim/lib/XenKvmLib/common_util.py | 9 +++++---- suites/libvirt-cim/lib/XenKvmLib/pool.py | 6 +++--- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++++++---- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++++++ 13 files changed, 44 insertions(+), 30 deletions(-)
diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py index 636f59c..b5ec5dc 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/08_CreateDiskResourcePool.py @@ -50,7 +50,7 @@ import sys import os from CimTest.Globals import logger -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class @@ -89,7 +89,7 @@ def main(): dp_types = { }
libvirt_version = virsh_version(server, virt) - if libvirt_version < "0.4.1": + if virsh_version_cmp(libvirt_version, "0.4.1") < 0: logger.info("Storage pool creation support is available in Libvirt " "version >= 0.4.1 , hence skipping the test....") return SKIP diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py index 511463b..3b0673f 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py @@ -38,7 +38,8 @@ from XenKvmLib.rasd import libvirt_rasd_storagepool_changes from XenKvmLib import rpcs_service from XenKvmLib.assoc import Associators from XenKvmLib.enumclass import GetInstance, EnumNames -from XenKvmLib.xm_virt_util import virsh_version, vol_list, vol_delete +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp, \ + vol_list, vol_delete from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.common_util import destroy_diskpool from XenKvmLib.pool import create_pool, undefine_diskpool, DIR_POOL @@ -186,7 +187,8 @@ def main():
libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py index 76e1e8f..318cfa8 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/11_create_dir_storagevolume_errs.py @@ -36,7 +36,7 @@ from CimTest.Globals import logger from XenKvmLib import rpcs_service from pywbem.cim_types import Uint64 from pywbem import CIM_ERR_FAILED, CIMError -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.rasd import libvirt_rasd_storagepool_changes @@ -125,7 +125,8 @@ def main():
libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py index 004af9f..215727f 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/12_create_netfs_storagevolume_errs.py @@ -36,7 +36,7 @@ from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup, get_provider_version from XenKvmLib.rasd import libvirt_rasd_storagepool_changes from XenKvmLib import rpcs_service -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.classes import get_typed_class, inst_to_mof from XenKvmLib.common_util import nfs_netfs_setup, netfs_cleanup from XenKvmLib.pool import create_pool, NETFS_POOL, get_diskpool, \ @@ -123,7 +123,8 @@ def main():
libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" and cim_rev < libvirt_rasd_storagepool_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_storagepool_changes: logger.info("Storage Volume creation support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_storagepool_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py index d7a6365..3578532 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/13_delete_storagevolume.py @@ -29,7 +29,7 @@ import sys from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS, SKIP -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.const import do_main, platform_sup, get_provider_version, \ default_pool_name, _image_dir from XenKvmLib import rpcs_service @@ -50,7 +50,8 @@ def main():
libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_spool_del_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_spool_del_changes: logger.info("Storage Volume deletion support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_spool_del_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py index 9e33215..d7ed5ad 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/14_delete_storagevolume_errs.py @@ -33,7 +33,7 @@ from VirtLib import utils from CimTest.Globals import logger from pywbem import CIM_ERR_FAILED, CIM_ERR_INVALID_PARAMETER, CIMError from CimTest.ReturnCodes import FAIL, PASS, SKIP -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.const import do_main, platform_sup, get_provider_version,\ default_pool_name, _image_dir from XenKvmLib import rpcs_service @@ -119,7 +119,8 @@ def main():
libvirt_ver = virsh_version(server, virt) cim_rev, changeset = get_provider_version(virt, server) - if libvirt_ver < "0.4.1" or cim_rev < libvirt_rasd_spool_del_changes: + if virsh_version_cmp(libvirt_ver, "0.4.1") < 0 or \ + cim_rev < libvirt_rasd_spool_del_changes: logger.info("Storage Volume deletion support is available with Libvirt" "version >= 0.4.1 and Libvirt-CIM rev '%s'", libvirt_rasd_spool_del_changes) diff --git a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py index b7e72a8..3cf6321 100644 --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/15_DiskPoolAutostart.py @@ -26,7 +26,7 @@ import sys import os from pywbem import cim_types from CimTest.Globals import logger -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from CimTest.ReturnCodes import FAIL, PASS, SKIP from XenKvmLib.const import do_main, platform_sup from XenKvmLib.classes import get_typed_class @@ -62,7 +62,7 @@ def main(): dp_types = { }
libvirt_version = virsh_version(server, virt) - if libvirt_version < "0.4.1": + if virsh_version_cmp(libvirt_version, "0.4.1") < 0: logger.info("Storage pool creation support is available in Libvirt " "version >= 0.4.1 , hence skipping the test....") return SKIP diff --git a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py index 555e3c1..dd19ca0 100644 --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py @@ -57,7 +57,6 @@ import os from distutils.file_util import move_file from XenKvmLib import assoc from XenKvmLib import enumclass -from XenKvmLib.xm_virt_util import virsh_version from CimTest.ReturnCodes import PASS, FAIL, SKIP from CimTest.Globals import logger, CIM_ERROR_GETINSTANCE, \ CIM_ERROR_ASSOCIATORS diff --git a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py index cf461d1..19294db 100644 --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/19_definenetwork_ers.py @@ -33,7 +33,7 @@ from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS from XenKvmLib.const import default_network_name, do_main, get_provider_version from XenKvmLib.common_util import create_netpool_conf, destroy_netpool -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp
sup_types = ['Xen', 'KVM', 'XenFV'] default_dom = 'brgtest_domain' @@ -71,7 +71,7 @@ def main(): libvirt_version = virsh_version(options.ip, options.virt) inv_empty_network = "Network not found" if options.virt == "Xen" or options.virt == "XenFV": - if libvirt_version <= "0.3.3": + if virsh_version_cmp(libvirt_version, "0.3.3") <= 0: inv_empty_network = "no network with matching name"
inv_br_str = "POST operation failed: (xend.err 'Device 0 (vif) " + \ @@ -89,10 +89,10 @@ def main():
expected_values['invalid']['bridge'] = inv_br_str else: - if libvirt_version >= "0.7.0": + if virsh_version_cmp(libvirt_version, "0.7.0") >= 0: expected_values['empty']['network'] = inv_empty_network expected_values['invalid']['network'] = inv_empty_network - if libvirt_version >= "0.9.8": + if virsh_version_cmp(libvirt_version, "0.9.8") >= 0: expected_values['invalid']['bridge'] = "Cannot get interface "\ "MTU on 'invalid'" else: diff --git a/suites/libvirt-cim/lib/XenKvmLib/common_util.py b/suites/libvirt-cim/lib/XenKvmLib/common_util.py index 9305c5e..f05fdc0 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py @@ -38,7 +38,8 @@ from XenKvmLib.classes import get_typed_class from CimTest.Globals import logger, CIM_ERROR_ENUMERATE, \ CIM_ERROR_GETINSTANCE from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC, SKIP -from XenKvmLib.xm_virt_util import diskpool_list, virsh_version, net_list,\ +from XenKvmLib.xm_virt_util import diskpool_list, virsh_version,\ + virsh_version_cmp, net_list,\ domain_list, virt2uri, net_destroy from XenKvmLib.vxml import PoolXML, NetXML from VirtLib import utils @@ -308,7 +309,7 @@ def cleanup_restore(server, virt): # libvirt_version >= 0.4.1 # Hence Skipping the logic to delete the new conf file # and just returning PASS - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: return status try: if os.path.exists(back_disk_file): @@ -365,7 +366,7 @@ def create_diskpool(server, virt='KVM', dpool=default_pool_name,
def create_diskpool_conf(server, virt, dpool=default_pool_name): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: status, dpoolname = create_diskpool(server, virt, dpool) diskid = "%s/%s" % ("DiskPool", dpoolname) else: @@ -376,7 +377,7 @@ def create_diskpool_conf(server, virt, dpool=default_pool_name):
def destroy_diskpool(server, virt, dpool): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: if dpool == None: logger.error("No disk pool specified") return FAIL diff --git a/suites/libvirt-cim/lib/XenKvmLib/pool.py b/suites/libvirt-cim/lib/XenKvmLib/pool.py index a5ca331..1a57aba 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py @@ -35,7 +35,7 @@ from XenKvmLib import rpcs_service import pywbem from CimTest.CimExt import CIMClassMOF from XenKvmLib.vxml import NetXML, PoolXML -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.vsms import RASD_TYPE_STOREVOL from XenKvmLib.common_util import destroy_diskpool
@@ -183,7 +183,7 @@ def undefine_netpool(server, virt, net_name):
def undefine_diskpool(server, virt, dp_name): libvirt_version = virsh_version(server, virt) - if libvirt_version >= '0.4.1': + if virsh_version_cmp(libvirt_version, '0.4.1') >= 0: if dp_name == None: return FAIL
@@ -285,7 +285,7 @@ def verify_pool(server, virt, poolname, pool_attr_list, mode_type=0, ret_mode = net_xml.xml_get_netpool_mode() libvirt_version = virsh_version(server, virt) #Forward mode support was added in 0.4.2 - if libvirt_version >= '0.4.2': + if virsh_version_cmp(libvirt_version, '0.4.2') >= 0: if mode_type == 1 and ret_mode != "nat": logger.error("Error when verifying 'nat' type network") return FAIL diff --git a/suites/libvirt-cim/lib/XenKvmLib/rasd.py b/suites/libvirt-cim/lib/XenKvmLib/rasd.py index d65011e..4d4240a 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py @@ -31,7 +31,7 @@ from XenKvmLib.assoc import Associators from XenKvmLib.const import default_pool_name, default_network_name, \ get_provider_version, default_net_type from XenKvmLib.pool import enum_volumes -from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.xm_virt_util import virsh_version, virsh_version_cmp from XenKvmLib.common_util import parse_instance_id
pasd_cn = 'ProcResourceAllocationSettingData' @@ -81,7 +81,8 @@ def rasd_init_list(vsxml, virt, t_disk, t_dom, t_mac, t_mem, server):
libvirt_version = virsh_version(server, virt)
- if virt == 'LXC' or (virt == 'XenFV' and libvirt_version < "0.6.3"): + if virt == 'LXC' or (virt == 'XenFV' and \ + virsh_version_cmp(libvirt_version, "0.6.3") < 0): point_device = "%s/%s" %(t_dom, "mouse:usb") elif virt == 'Xen': point_device = "%s/%s" %(t_dom, "mouse:xen") @@ -357,7 +358,8 @@ def get_exp_disk_rasd_len(virt, ip, rev, id): rev < libvirt_rasd_new_changes: exp_len = exp_base_num + exp_cdrom
- elif rev >= libvirt_rasd_dpool_changes and libvirt_ver >= '0.4.1': + elif rev >= libvirt_rasd_dpool_changes and \ + virsh_version_cmp(libvirt_ver, '0.4.1') >= 0: volumes = enum_volumes(virt, ip) if rev >= libvirt_rasd_floppy_changes: exp_len = ((volumes * exp_base_num) + \ @@ -383,7 +385,7 @@ def get_exp_disk_rasd_len(virt, ip, rev, id): exp_len = (volumes * exp_base_num) + exp_cdrom
- if virt != 'LXC' and libvirt_ver >= '0.4.1': + if virt != 'LXC' and virsh_version_cmp(libvirt_ver, '0.4.1') >= 0: if rev >= libvirt_rasd_storagepool_changes: exp_len += exp_storagevol_rasd
diff --git a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py index f365a54..7749fb7 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py +++ b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py @@ -23,6 +23,7 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os +import re from VirtLib import utils import socket from VirtLib.live import fv_cap @@ -223,6 +224,11 @@ def network_by_bridge(bridge, server, virt="Xen"):
return None
+def virsh_version_cmp(version1, version2): + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] + return cmp(normalize(version1), normalize(version2)) + def virsh_version(server, virt="KVM"): cmd = "virsh -c %s -v 2>/dev/null" % virt2uri(virt) ret, out = utils.run_remote(server, cmd) -- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

Failing test: HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Failed to get associatornames according to KVM_AllocationCapabilities ERROR - Exception: list index out of range Investigation discovered that 'enum_volumes()' was not properly handling the case where there were no volumes in 'DiskPool/default'. Also function cannot return 'None' as caller 'get_exp_disk_rasd_len' assumed integer return value and attempted a multiplication resulting in an exception. --- suites/libvirt-cim/lib/XenKvmLib/pool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/suites/libvirt-cim/lib/XenKvmLib/pool.py b/suites/libvirt-cim/lib/XenKvmLib/pool.py index 1a57aba..ddbe532 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py @@ -110,8 +110,8 @@ def enum_volumes(virt, server, pooln=default_pool_name): cmd = 'virsh -c %s vol-list %s 2>/dev/null | sed -e "1,2 d" -e "$ d"' % \ (virt2uri(virt), pooln) ret, out = run_remote(server ,cmd) - if ret != 0: - return None + if ret != 0 or len(out) == 0: + return volume lines = out.split("\n") for line in lines: vol = line.split()[0] -- 1.8.1.4

Reviewed-by: Xu Wang <cngesaint@outlook.com> ----------------------------------------
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:14 -0400 Subject: [Libvirt-cim] [PATCH v2 12/12] pool: Need to handle when there are no volumes in the default pool
Failing test: HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Failed to get associatornames according to KVM_AllocationCapabilities ERROR - Exception: list index out of range
Investigation discovered that 'enum_volumes()' was not properly handling the case where there were no volumes in 'DiskPool/default'.
Also function cannot return 'None' as caller 'get_exp_disk_rasd_len' assumed integer return value and attempted a multiplication resulting in an exception. --- suites/libvirt-cim/lib/XenKvmLib/pool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/suites/libvirt-cim/lib/XenKvmLib/pool.py b/suites/libvirt-cim/lib/XenKvmLib/pool.py index 1a57aba..ddbe532 100644 --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py @@ -110,8 +110,8 @@ def enum_volumes(virt, server, pooln=default_pool_name): cmd = 'virsh -c %s vol-list %s 2>/dev/null | sed -e "1,2 d" -e "$ d"' % \ (virt2uri(virt), pooln) ret, out = run_remote(server ,cmd) - if ret != 0: - return None + if ret != 0 or len(out) == 0: + return volume lines = out.split("\n") for line in lines: vol = line.split()[0] -- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

I have run cimtest with these 12 patches and they solved most of bugs before. However, I found some new FAIL cases. 1. -------------------------------------------------------------------- HostSystem - 01_enum.py: FAIL ERROR - Exp KVM_HostSystem, got KVM_HostSystem ERROR - Exp localhost, got RH64wenchao CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Hostname mismatch -------------------------------------------------------------------- Hostname mismatch because cimtest match localhost with RH64wenchao... Could you find out what cause it and fix it? 2. -------------------------------------------------------------------- SwitchService - 01_enum.py: XFAIL Please check if this is the expected result --- *** VSI NOT supported *** -------------------------------------------------------------------- Why XFAIL but not SKIP here? My computer does not support it. 3. -------------------------------------------------------------------- VirtualSystemManagementService - 22_addmulti_brg_interface.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_net_res ERROR - AddResourceSettings call failed ERROR - Failed to destroy Virtual Network 'my_network1' InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging Bug:<00015> -------------------------------------------------------------------- VirtualSystemManagementService - 30_dynamic_disk_mod.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_disk_res ERROR - AddResourceSettings call failed InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging -------------------------------------------------------------------- I am sorry I don't clear what caused it. 4. -------------------------------------------------------------------- VirtualSystemManagementService - 27_definesystem_macvtap_dev.py: FAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start rstest_nic InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- VirtualSystemManagementService - 28_definesystem_with_vsi_profile.py: XFAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start VM *** Is VSI support available on this host? *** InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- This FAIL happens because there is no eth1 interface on my computer. The name of my network interface is "eth3". So I think it had better list all network interfaces and pick available one for testing. For multicard machine there are some rule to choice one or...configure it manually?
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:02 -0400 Subject: [Libvirt-cim] [PATCH v2 00/12] cimtest updates
This is primarily a repost of the previous series:
https://www.redhat.com/archives/libvirt-cim/2013-March/msg00051.html
and
https://www.redhat.com/archives/libvirt-cim/2013-April/msg00014.html
The primary difference in this patch vs. the previous sets is to fix the version string checking for nfs server checking in common_util.py and to add patch 12/12 which handles a problem in 'enum_volumes()' in pool.py.
I assume patches 1-4 and 6-9 were reviewed without issue. So focus on 5/12 and 10-12/12.
John Ferlan (12): Need to check "slp=true", not just "slp" since "slp=false" is possible Change the MAC from "99:" to "88:" Create a temporary directory for disk pool tests Use symbols as named in libvirt-cim for easier reference Fix nfs-server lookup code Fix os_status passing to reporter functions Resolve issues found in test. On Fedora systems default to using 'em1' instead of 'eth1' 19 - resolve issues found in test vxml: Add which volume could not be found to error message Add and utilize virsh_version_cmp pool: Need to handle when there are no volumes in the default pool
.../Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- .../08_CreateDiskResourcePool.py | 30 +++++-- .../09_DeleteDiskPool.py | 19 +++-- .../10_create_storagevolume.py | 6 +- .../11_create_dir_storagevolume_errs.py | 5 +- .../12_create_netfs_storagevolume_errs.py | 5 +- .../13_delete_storagevolume.py | 5 +- .../14_delete_storagevolume_errs.py | 5 +- .../15_DiskPoolAutostart.py | 15 +++- .../SettingsDefineCapabilities/01_forward.py | 1 - .../libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../06_addresource.py | 2 +- .../08_modifyresource.py | 4 +- .../13_refconfig_additional_devs.py | 4 +- .../15_mod_system_settings.py | 11 ++- .../18_define_sys_bridge.py | 2 +- .../19_definenetwork_ers.py | 23 +++--- .../22_addmulti_brg_interface.py | 2 +- .../27_definesystem_macvtap_dev.py | 19 ++++- .../28_definesystem_with_vsi_profile.py | 15 ++++ suites/libvirt-cim/lib/XenKvmLib/common_util.py | 93 +++++++++++++++++----- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +- suites/libvirt-cim/lib/XenKvmLib/pool.py | 10 +-- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 +- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++ suites/libvirt-cim/main.py | 15 ++++ 28 files changed, 237 insertions(+), 86 deletions(-)
-- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

On 04/28/2013 01:19 AM, WangXu wrote:
I have run cimtest with these 12 patches and they solved most of bugs before. However, I found some new FAIL cases. 1. -------------------------------------------------------------------- HostSystem - 01_enum.py: FAIL ERROR - Exp KVM_HostSystem, got KVM_HostSystem ERROR - Exp localhost, got RH64wenchao CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Hostname mismatch -------------------------------------------------------------------- Hostname mismatch because cimtest match localhost with RH64wenchao... Could you find out what cause it and fix it?
Not sure this has anything to do with my set of changes - seems more like a system configuration issue that since it's happening on your system is probably more easily debugged by you. Start in 01_enum.py and do various print's in order to figure out where issue is. My system config has an 'options.ip' = localhost and the return from "full_hostname" set to a "generated" name. I have nothing in my /etc/hosts nor in my /etc/sysconfig/network-scripts/* files.
2. -------------------------------------------------------------------- SwitchService - 01_enum.py: XFAIL Please check if this is the expected result --- *** VSI NOT supported *** -------------------------------------------------------------------- Why XFAIL but not SKIP here? My computer does not support it.
Neither does mine, but XFAIL is the original design.
3. -------------------------------------------------------------------- VirtualSystemManagementService - 22_addmulti_brg_interface.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_net_res ERROR - AddResourceSettings call failed ERROR - Failed to destroy Virtual Network 'my_network1' InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging Bug:<00015>
Unrelated to my changes. The only change I made to 22*.py was the 'test_mac' value. See more below though...
-------------------------------------------------------------------- VirtualSystemManagementService - 30_dynamic_disk_mod.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_disk_res ERROR - AddResourceSettings call failed InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging
I didn't make any changes here
-------------------------------------------------------------------- I am sorry I don't clear what caused it.
Same here :-) Although it seems as though the test is trying to add a disk to a pci.0 device/bus, but that addition is not supported for a running domain in the configuration provided by cimtest. It does make me wonder if these tests ever passed for KVM... Digging into the tests a bit, the failure for 22*.py comes as a result of a call to cimtest method 'add_net_res()'. There are two tests that call it. The other is VirtualSystemManagementService/06_addresource.py; however, there's a check in the 06*.py test which doesn't attempt the call when the domain is started, e.g.: if options.virt == "KVM": test_cases = ["define"] else: test_cases = ["define", "start"] Since test 22* attempts this call when things are started, it's a different path of support - namely 'hotplug'. After a bit of trial and error, I have a possible fix. I found that I had to change the domain configuration in the test to set "acpi=True". Then I had to change the 'add_net_res' code to query "source/@bridge" and not "source/@network" and compare against attr['virt_net'] and not attr['net_name']. The net_name (e.g. my_network1) is a network pool attribute not a domain network attribute. I'm really not sure this test worked or passed previously. I will dive into test 30*.py at some point in time. I did try the simple "acpi=True" change there, but that just caused other failures.
4. -------------------------------------------------------------------- VirtualSystemManagementService - 27_definesystem_macvtap_dev.py: FAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start rstest_nic InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- VirtualSystemManagementService - 28_definesystem_with_vsi_profile.py: XFAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start VM *** Is VSI support available on this host? *** InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- This FAIL happens because there is no eth1 interface on my computer. The name of my network interface is "eth3". So I think it had better list all network interfaces and pick available one for testing. For multicard machine there are some rule to choice one or...configure it manually?
Correct - there is no method now to get a/the list of network interfaces and then "choose" a valid one to execute tests. To further muddy the waters the latest Fedora releases uses 'em#' instead of 'eth#' - the commit message from my change has a pointer to the details. For now though the values are hardcoded. It's something I'm sure I noted in one our exchanges already. John
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:02 -0400 Subject: [Libvirt-cim] [PATCH v2 00/12] cimtest updates
This is primarily a repost of the previous series:
https://www.redhat.com/archives/libvirt-cim/2013-March/msg00051.html
and
https://www.redhat.com/archives/libvirt-cim/2013-April/msg00014.html
The primary difference in this patch vs. the previous sets is to fix the version string checking for nfs server checking in common_util.py and to add patch 12/12 which handles a problem in 'enum_volumes()' in pool.py.
I assume patches 1-4 and 6-9 were reviewed without issue. So focus on 5/12 and 10-12/12.
John Ferlan (12): Need to check "slp=true", not just "slp" since "slp=false" is possible Change the MAC from "99:" to "88:" Create a temporary directory for disk pool tests Use symbols as named in libvirt-cim for easier reference Fix nfs-server lookup code Fix os_status passing to reporter functions Resolve issues found in test. On Fedora systems default to using 'em1' instead of 'eth1' 19 - resolve issues found in test vxml: Add which volume could not be found to error message Add and utilize virsh_version_cmp pool: Need to handle when there are no volumes in the default pool
.../Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- .../08_CreateDiskResourcePool.py | 30 +++++-- .../09_DeleteDiskPool.py | 19 +++-- .../10_create_storagevolume.py | 6 +- .../11_create_dir_storagevolume_errs.py | 5 +- .../12_create_netfs_storagevolume_errs.py | 5 +- .../13_delete_storagevolume.py | 5 +- .../14_delete_storagevolume_errs.py | 5 +- .../15_DiskPoolAutostart.py | 15 +++- .../SettingsDefineCapabilities/01_forward.py | 1 - .../libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../06_addresource.py | 2 +- .../08_modifyresource.py | 4 +- .../13_refconfig_additional_devs.py | 4 +- .../15_mod_system_settings.py | 11 ++- .../18_define_sys_bridge.py | 2 +- .../19_definenetwork_ers.py | 23 +++--- .../22_addmulti_brg_interface.py | 2 +- .../27_definesystem_macvtap_dev.py | 19 ++++- .../28_definesystem_with_vsi_profile.py | 15 ++++ suites/libvirt-cim/lib/XenKvmLib/common_util.py | 93 +++++++++++++++++----- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +- suites/libvirt-cim/lib/XenKvmLib/pool.py | 10 +-- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 +- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++ suites/libvirt-cim/main.py | 15 ++++ 28 files changed, 237 insertions(+), 86 deletions(-)
-- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

----------------------------------------
Date: Thu, 2 May 2013 08:28:54 -0400 From: jferlan@redhat.com To: libvirt-cim@redhat.com Subject: Re: [Libvirt-cim] [PATCH v2 00/12] cimtest updates
On 04/28/2013 01:19 AM, WangXu wrote:
I have run cimtest with these 12 patches and they solved most of bugs before. However, I found some new FAIL cases. 1. -------------------------------------------------------------------- HostSystem - 01_enum.py: FAIL ERROR - Exp KVM_HostSystem, got KVM_HostSystem ERROR - Exp localhost, got RH64wenchao CIM_ERR_INVALID_CLASS: Linux_ComputerSystem -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Hostname mismatch -------------------------------------------------------------------- Hostname mismatch because cimtest match localhost with RH64wenchao... Could you find out what cause it and fix it?
Not sure this has anything to do with my set of changes - seems more like a system configuration issue that since it's happening on your system is probably more easily debugged by you. Start in 01_enum.py and do various print's in order to figure out where issue is. My system config has an 'options.ip' = localhost and the return from "full_hostname" set to a "generated" name. I have nothing in my /etc/hosts nor in my /etc/sysconfig/network-scripts/* files.
Yes,it has nothing to do with your updates. These 12 patches seems work well under testing. I just point out the problems I met:) This issue I found the cause: I must write hostname before "localhost" in hosts. If they were written as "127.0.0.1 localhost HOSTNAME", this testcase would fail. However, I adjusted the item to be "127.0.0.1 HOSTNAME localhost" and the testcase passed. My suggestion is could it become "smarter" to recoginze the difference between 'localhost' and HOSTNAME?
2. -------------------------------------------------------------------- SwitchService - 01_enum.py: XFAIL Please check if this is the expected result --- *** VSI NOT supported *** -------------------------------------------------------------------- Why XFAIL but not SKIP here? My computer does not support it.
Neither does mine, but XFAIL is the original design.
In my original opinion, I thought that if some conditions doesn't be saitisfied, (such as absence of dependency, do not support some hardware or software, etc.) it will be marked as SKIP. But it maybe not accurate. The same reason I listed the following issue. I'll ignore this issue from now on:-)
3. -------------------------------------------------------------------- VirtualSystemManagementService - 22_addmulti_brg_interface.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_net_res ERROR - AddResourceSettings call failed ERROR - Failed to destroy Virtual Network 'my_network1' InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging Bug:<00015>
Unrelated to my changes. The only change I made to 22*.py was the 'test_mac' value. See more below though...
-------------------------------------------------------------------- VirtualSystemManagementService - 30_dynamic_disk_mod.py: XFAIL ERROR - (1, u"CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging") ERROR - Error invoking AddRS: add_disk_res ERROR - AddResourceSettings call failed InvokeMethod(AddResourceSettings): CIM_ERR_FAILED: Unable to change (0) device: internal error unable to execute QEMU command 'device_add': Bus 'pci.0' does not support hotplugging
I didn't make any changes here
-------------------------------------------------------------------- I am sorry I don't clear what caused it.
Same here :-) Although it seems as though the test is trying to add a disk to a pci.0 device/bus, but that addition is not supported for a running domain in the configuration provided by cimtest. It does make me wonder if these tests ever passed for KVM...
Digging into the tests a bit, the failure for 22*.py comes as a result of a call to cimtest method 'add_net_res()'. There are two tests that call it. The other is VirtualSystemManagementService/06_addresource.py; however, there's a check in the 06*.py test which doesn't attempt the call when the domain is started, e.g.:
if options.virt == "KVM": test_cases = ["define"] else: test_cases = ["define", "start"]
Since test 22* attempts this call when things are started, it's a different path of support - namely 'hotplug'. After a bit of trial and error, I have a possible fix. I found that I had to change the domain configuration in the test to set "acpi=True". Then I had to change the 'add_net_res' code to query "source/@bridge" and not "source/@network" and compare against attr['virt_net'] and not attr['net_name']. The net_name (e.g. my_network1) is a network pool attribute not a domain network attribute. I'm really not sure this test worked or passed previously.
I will dive into test 30*.py at some point in time. I did try the simple "acpi=True" change there, but that just caused other failures.
4. -------------------------------------------------------------------- VirtualSystemManagementService - 27_definesystem_macvtap_dev.py: FAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start rstest_nic InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- VirtualSystemManagementService - 28_definesystem_with_vsi_profile.py: XFAIL ERROR - Got CIM error CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device with return code 1 ERROR - Unable to start VM *** Is VSI support available on this host? *** InvokeMethod(RequestStateChange): CIM_ERR_FAILED: Unable to start domain: Unable to get index for interface eth1: No such device -------------------------------------------------------------------- This FAIL happens because there is no eth1 interface on my computer. The name of my network interface is "eth3". So I think it had better list all network interfaces and pick available one for testing. For multicard machine there are some rule to choice one or...configure it manually?
Correct - there is no method now to get a/the list of network interfaces and then "choose" a valid one to execute tests. To further muddy the waters the latest Fedora releases uses 'em#' instead of 'eth#' - the commit message from my change has a pointer to the details.
For now though the values are hardcoded. It's something I'm sure I noted in one our exchanges already.
John
From: jferlan@redhat.com To: libvirt-cim@redhat.com Date: Wed, 24 Apr 2013 10:09:02 -0400 Subject: [Libvirt-cim] [PATCH v2 00/12] cimtest updates
This is primarily a repost of the previous series:
https://www.redhat.com/archives/libvirt-cim/2013-March/msg00051.html
and
https://www.redhat.com/archives/libvirt-cim/2013-April/msg00014.html
The primary difference in this patch vs. the previous sets is to fix the version string checking for nfs server checking in common_util.py and to add patch 12/12 which handles a problem in 'enum_volumes()' in pool.py.
I assume patches 1-4 and 6-9 were reviewed without issue. So focus on 5/12 and 10-12/12.
John Ferlan (12): Need to check "slp=true", not just "slp" since "slp=false" is possible Change the MAC from "99:" to "88:" Create a temporary directory for disk pool tests Use symbols as named in libvirt-cim for easier reference Fix nfs-server lookup code Fix os_status passing to reporter functions Resolve issues found in test. On Fedora systems default to using 'em1' instead of 'eth1' 19 - resolve issues found in test vxml: Add which volume could not be found to error message Add and utilize virsh_version_cmp pool: Need to handle when there are no volumes in the default pool
.../Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- .../08_CreateDiskResourcePool.py | 30 +++++-- .../09_DeleteDiskPool.py | 19 +++-- .../10_create_storagevolume.py | 6 +- .../11_create_dir_storagevolume_errs.py | 5 +- .../12_create_netfs_storagevolume_errs.py | 5 +- .../13_delete_storagevolume.py | 5 +- .../14_delete_storagevolume_errs.py | 5 +- .../15_DiskPoolAutostart.py | 15 +++- .../SettingsDefineCapabilities/01_forward.py | 1 - .../libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../06_addresource.py | 2 +- .../08_modifyresource.py | 4 +- .../13_refconfig_additional_devs.py | 4 +- .../15_mod_system_settings.py | 11 ++- .../18_define_sys_bridge.py | 2 +- .../19_definenetwork_ers.py | 23 +++--- .../22_addmulti_brg_interface.py | 2 +- .../27_definesystem_macvtap_dev.py | 19 ++++- .../28_definesystem_with_vsi_profile.py | 15 ++++ suites/libvirt-cim/lib/XenKvmLib/common_util.py | 93 +++++++++++++++++----- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +- suites/libvirt-cim/lib/XenKvmLib/pool.py | 10 +-- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 +- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++ suites/libvirt-cim/main.py | 15 ++++ 28 files changed, 237 insertions(+), 86 deletions(-)
-- 1.8.1.4
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

On 04/24/2013 10:09 AM, John Ferlan wrote:
This is primarily a repost of the previous series:
https://www.redhat.com/archives/libvirt-cim/2013-March/msg00051.html
and
https://www.redhat.com/archives/libvirt-cim/2013-April/msg00014.html
The primary difference in this patch vs. the previous sets is to fix the version string checking for nfs server checking in common_util.py and to add patch 12/12 which handles a problem in 'enum_volumes()' in pool.py.
I assume patches 1-4 and 6-9 were reviewed without issue. So focus on 5/12 and 10-12/12.
John Ferlan (12): Need to check "slp=true", not just "slp" since "slp=false" is possible Change the MAC from "99:" to "88:" Create a temporary directory for disk pool tests Use symbols as named in libvirt-cim for easier reference Fix nfs-server lookup code Fix os_status passing to reporter functions Resolve issues found in test. On Fedora systems default to using 'em1' instead of 'eth1' 19 - resolve issues found in test vxml: Add which volume could not be found to error message Add and utilize virsh_version_cmp pool: Need to handle when there are no volumes in the default pool
.../Profile/04_verify_libvirt_cim_slp_profiles.py | 2 +- .../08_CreateDiskResourcePool.py | 30 +++++-- .../09_DeleteDiskPool.py | 19 +++-- .../10_create_storagevolume.py | 6 +- .../11_create_dir_storagevolume_errs.py | 5 +- .../12_create_netfs_storagevolume_errs.py | 5 +- .../13_delete_storagevolume.py | 5 +- .../14_delete_storagevolume_errs.py | 5 +- .../15_DiskPoolAutostart.py | 15 +++- .../SettingsDefineCapabilities/01_forward.py | 1 - .../libvirt-cim/cimtest/VSSD/06_duplicate_uuid.py | 2 +- .../06_addresource.py | 2 +- .../08_modifyresource.py | 4 +- .../13_refconfig_additional_devs.py | 4 +- .../15_mod_system_settings.py | 11 ++- .../18_define_sys_bridge.py | 2 +- .../19_definenetwork_ers.py | 23 +++--- .../22_addmulti_brg_interface.py | 2 +- .../27_definesystem_macvtap_dev.py | 19 ++++- .../28_definesystem_with_vsi_profile.py | 15 ++++ suites/libvirt-cim/lib/XenKvmLib/common_util.py | 93 +++++++++++++++++----- suites/libvirt-cim/lib/XenKvmLib/const.py | 6 +- suites/libvirt-cim/lib/XenKvmLib/pool.py | 10 +-- suites/libvirt-cim/lib/XenKvmLib/rasd.py | 10 ++- suites/libvirt-cim/lib/XenKvmLib/test_xml.py | 2 +- suites/libvirt-cim/lib/XenKvmLib/vxml.py | 4 +- suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py | 6 ++ suites/libvirt-cim/main.py | 15 ++++ 28 files changed, 237 insertions(+), 86 deletions(-)
I have pushed the series to cimtest.git I have a fix for the issues mentioned in follow-up exchanges regarding 22_addmulti_brg_interface.py which I will post shortly. John
participants (2)
-
John Ferlan
-
WangXu