
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1243630742 25200 # Node ID 88d210d65d3e405deb4f4693643f8e1ec7d7c1e8 # Parent 9a2db4596db33348b860a0e2337e377f237b0692 [TEST] #3 Add support for DiskPoolRASD / NetPoolRASD to get_exp_template_rasd_len() Also update HostSystem 03 to support DiskPoolRASD / NetPoolRASD Updates from2 to 3: -Template DiskPoolRASDs now exist for disk and iscsi type pools - number of pools should now be 5, not 3. -Also, pass pool name to enum_volumes() -Fix error in enum_volumes() Updates from 1 to 2: -Add instance id to log message HostSystem/03_hs_to_settdefcap.py: 223 -Rework the DiskRASD portion of get_exp_disk_rasd_len() - only check for volumed if the libvirt version is greater than 0.4.1. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py --- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Fri May 29 13:59:02 2009 -0700 @@ -204,8 +204,10 @@ else: rtype = { "%s_DiskResourceAllocationSettingData" % virt : 17, \ + "%s_DiskPoolResourceAllocationSettingData" % virt : 17, \ "%s_MemResourceAllocationSettingData" % virt : 4, \ "%s_NetResourceAllocationSettingData" % virt : 10, \ + "%s_NetPoolResourceAllocationSettingData" % virt : 10, \ "%s_ProcResourceAllocationSettingData" % virt : 3 } try: @@ -218,8 +220,8 @@ exp_len = get_exp_template_rasd_len(virt, server, ap['InstanceID']) if len(assoc_info) != exp_len: - logger.error("'%s' returned %i RASD objects instead of %i", - an, len(assoc_info), exp_len) + logger.error("%s returned %i RASD objects instead of %i for %s", + an, len(assoc_info), exp_len, ap['InstanceID']) return FAIL for inst in assoc_info: diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Fri May 29 13:59:02 2009 -0700 @@ -92,7 +92,7 @@ def enum_volumes(virt, server, pooln=default_pool_name): volume = 0 cmd = "virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" % \ - (virt2uri(virt), default_pool_name) + (virt2uri(virt), pooln) ret, out = run_remote(server ,cmd) if ret != 0: return None diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/lib/XenKvmLib/rasd.py --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri May 29 13:59:02 2009 -0700 @@ -31,6 +31,8 @@ from XenKvmLib.const import default_pool_name, default_network_name, \ get_provider_version from XenKvmLib.pool import enum_volumes +from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.common_util import parse_instance_id pasd_cn = 'ProcResourceAllocationSettingData' nasd_cn = 'NetResourceAllocationSettingData' @@ -304,45 +306,77 @@ return rasd_insts, PASS -def get_exp_template_rasd_len(virt, ip, id): +def get_exp_disk_rasd_len(virt, ip, rev, id): libvirt_rasd_template_changes = 707 libvirt_rasd_new_changes = 805 libvirt_rasd_dpool_changes = 839 - curr_cim_rev, changeset = get_provider_version(virt, ip) + libvirt_ver = virsh_version(ip, virt) # For Diskpool, we have info 1 for each of Min, Max, Default, and Incr exp_base_num = 4 exp_cdrom = 4 - exp_len = exp_base_num + exp_len = exp_base_num - if 'DiskPool' in id: - if virt == 'Xen' or virt == 'XenFV': - # For Xen and XenFV, there is a template for PV and FV, so you - # end up with double the number of templates - xen_multi = 2 + if id == "DiskPool/0": + pool_types = 5 + return exp_base_num * pool_types + + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 - if curr_cim_rev >= libvirt_rasd_template_changes and \ - curr_cim_rev < libvirt_rasd_new_changes: - exp_len = exp_base_num + exp_cdrom + if rev >= libvirt_rasd_template_changes and \ + rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = (exp_base_num + exp_cdrom) * xen_multi + elif rev >= libvirt_rasd_dpool_changes and libvirt_ver >= '0.4.1': + volumes = enum_volumes(virt, ip) + exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi + exp_len = (exp_base_num + exp_cdrom) * xen_multi - elif virt == 'KVM': - if curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = exp_base_num + exp_cdrom + elif virt == 'KVM': + if rev >= libvirt_rasd_new_changes and \ + rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = (volumes * exp_base_num) + exp_cdrom + elif rev >= libvirt_rasd_dpool_changes: + id = parse_instance_id(id) + volumes = enum_volumes(virt, ip, id[1]) + exp_len = (volumes * exp_base_num) + exp_cdrom return exp_len +def get_exp_net_rasd_len(virt, rev, id): + net_rasd_template_changes = 861 + + exp_base_num = 4 + + if id == "NetworkPool/0": + pool_types = 3 + forward_modes = 2 + + return (exp_base_num * pool_types) + (exp_base_num * forward_modes) + + if rev >= net_rasd_template_changes: + dev_types = 2 + + return exp_base_num * dev_types + +def get_exp_template_rasd_len(virt, ip, id): + curr_cim_rev, changeset = get_provider_version(virt, ip) + + exp_len = 4 + + if 'DiskPool' in id: + exp_len = get_exp_disk_rasd_len(virt, ip, curr_cim_rev, id) + + elif 'NetworkPool' in id: + exp_len = get_exp_net_rasd_len(virt, curr_cim_rev, id) + + return exp_len + +