[PATCH 0 of 2] Create function for determine exp # of template RASDs

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1240183799 25200 # Node ID 7ce2862e03dc20cd1e6d3ec9845912285d64eef1 # Parent 5706fea95239f1633296e2fbce13ada882848e13 [TEST] Add get_exp_template_rasd_len() - returns the expected # of template RASD Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 5706fea95239 -r 7ce2862e03dc suites/libvirt-cim/lib/XenKvmLib/rasd.py --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri Apr 17 14:11:23 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py Sun Apr 19 16:29:59 2009 -0700 @@ -28,7 +28,9 @@ from XenKvmLib.classes import get_typed_class, get_class_type from XenKvmLib.enumclass import GetInstance, EnumInstances from XenKvmLib.assoc import Associators -from XenKvmLib.const import default_pool_name, default_network_name +from XenKvmLib.const import default_pool_name, default_network_name, \ + get_provider_version +from XenKvmLib.pool import enum_volumes pasd_cn = 'ProcResourceAllocationSettingData' nasd_cn = 'NetResourceAllocationSettingData' @@ -302,3 +304,45 @@ return rasd_insts, PASS +def get_exp_template_rasd_len(virt, ip, id): + libvirt_rasd_template_changes = 707 + libvirt_rasd_new_changes = 805 + libvirt_rasd_dpool_changes = 839 + + curr_cim_rev, changeset = get_provider_version(virt, ip) + + # For Diskpool, we have info 1 for each of Min, Max, Default, and Incr + exp_base_num = 4 + exp_cdrom = 4 + + exp_len = exp_base_num + + if 'DiskPool' in id: + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 + + if curr_cim_rev >= libvirt_rasd_template_changes and \ + curr_cim_rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = (exp_base_num + exp_cdrom) * xen_multi + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) * xen_multi + (exp_cdrom * 2) + + elif virt == 'KVM': + if curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) + exp_cdrom + + return exp_len +

Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1240183799 25200 # Node ID 7ce2862e03dc20cd1e6d3ec9845912285d64eef1 # Parent 5706fea95239f1633296e2fbce13ada882848e13 [TEST] Add get_exp_template_rasd_len() - returns the expected # of template RASD
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 5706fea95239 -r 7ce2862e03dc suites/libvirt-cim/lib/XenKvmLib/rasd.py --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri Apr 17 14:11:23 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py Sun Apr 19 16:29:59 2009 -0700 @@ -28,7 +28,9 @@ from XenKvmLib.classes import get_typed_class, get_class_type from XenKvmLib.enumclass import GetInstance, EnumInstances from XenKvmLib.assoc import Associators -from XenKvmLib.const import default_pool_name, default_network_name +from XenKvmLib.const import default_pool_name, default_network_name, \ + get_provider_version +from XenKvmLib.pool import enum_volumes
pasd_cn = 'ProcResourceAllocationSettingData' nasd_cn = 'NetResourceAllocationSettingData' @@ -302,3 +304,45 @@
return rasd_insts, PASS
+def get_exp_template_rasd_len(virt, ip, id): + libvirt_rasd_template_changes = 707 + libvirt_rasd_new_changes = 805 + libvirt_rasd_dpool_changes = 839 + + curr_cim_rev, changeset = get_provider_version(virt, ip) + + # For Diskpool, we have info 1 for each of Min, Max, Default, and Incr + exp_base_num = 4 + exp_cdrom = 4 + + exp_len = exp_base_num + + if 'DiskPool' in id: + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 + + if curr_cim_rev >= libvirt_rasd_template_changes and \ + curr_cim_rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = (exp_base_num + exp_cdrom) * xen_multi + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) * xen_multi + (exp_cdrom * 2)
I did not understand why we have exp_cdrom * 2. Can you brief on what is the expected cdrom records now ?? With the new changes what is the expected diff DiskPool informations ?
+ + elif virt == 'KVM': + if curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) + exp_cdrom + + return exp_len +
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

+ + exp_len = exp_base_num + + if 'DiskPool' in id: + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 + + if curr_cim_rev >= libvirt_rasd_template_changes and \ + curr_cim_rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = (exp_base_num + exp_cdrom) * xen_multi + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) * xen_multi + (exp_cdrom * 2)
I did not understand why we have exp_cdrom * 2.
Oops.. this should have been exp_cdrom * xen_multi (4 CDROM instances for PV guests, 4 CDROM instances for FV guests). I'll resend with a fix for this.
Can you brief on what is the expected cdrom records now ??
The CDROM template generation code in libvirt hasn't changed. 4 instances for CDROM are generated (max, min, incr, def). For Xen, we generate a set of max, min, incr, def for PV and for FV. So you see 8 templates in the Xen case.
With the new changes what is the expected diff DiskPool informations ?
Are you talking about the recent provider changes? Here's the steps: 1) For min, max, incr, def: a) For each storage volume in the storage pool do: i) Checks to see if we can get info on the storage volume. ii) If libvirt is able to get the info, generate an instance b) Generate a CDROM inst (for Xen, generate one for PV and one for FV)
+ + elif virt == 'KVM': + if curr_cim_rev >= libvirt_rasd_new_changes and \ + curr_cim_rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom + + elif curr_cim_rev >= libvirt_rasd_dpool_changes: + volumes = enum_volumes(virt, ip) + exp_len = (volumes * exp_base_num) + exp_cdrom + + return exp_len +
-- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1240183800 25200 # Node ID 7bac72ee3582b0bb822b9062234ae1989a464f60 # Parent 7ce2862e03dc20cd1e6d3ec9845912285d64eef1 [TEST] Update tests to use get_exp_template_rasd_len() This way, the template RASD calculation only needs to be updated in one place if it changes in the future. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 7ce2862e03dc -r 7bac72ee3582 suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py --- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Sun Apr 19 16:29:59 2009 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Sun Apr 19 16:30:00 2009 -0700 @@ -29,13 +29,15 @@ # Steps: # 1. Create a guest. # 2. Enumerate the HostSystem . -# 3. Using the HostedResourcePool association, get the HostSystem instances on the system -# 4. Using the ElementCapabilities association get the ProcessorPool, MemPool, DiskPool & -# NetPool instances on the system. -# 5. Using the SettingsDefineCapabilities association on the AllocationCapabilities, get -# the (Default, Minimum, Maximum and Increment) instances for ProcRASD. -# 6. Similarly for the MemRASD, DiskRASD & NetRASD get the SettingDefineCap assocn and \ -# get the instances for (Def, Min, Max and Inc). +# 3. Using the HostedResourcePool association, get the HostSystem instances +# on the system +# 4. Using the ElementCapabilities association get the ProcessorPool, +# MemPool, DiskPool & NetPool instances on the system. +# 5. Using the SettingsDefineCapabilities association on the +# AllocationCapabilities, get the (Default, Minimum, Maximum and +# Increment) instances for ProcRASD. +# 6. Similarly for the MemRASD, DiskRASD & NetRASD get the SettingDefineCap +# assocn and get the instances for (Def, Min, Max and Inc). # # Feb 13 2008 @@ -50,15 +52,11 @@ from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.test_xml import testxml from XenKvmLib.test_doms import destroy_and_undefine_all -from XenKvmLib.const import get_provider_version -from XenKvmLib.pool import enum_volumes +from XenKvmLib.rasd import get_exp_template_rasd_len sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] test_dom = "domgst_test" test_vcpus = 1 -libvirt_rasd_template_changes = 707 -libvirt_rasd_new_changes = 805 -libvirt_rasd_dpool_changes = 839 def setup_env(server, virt="Xen"): status = PASS @@ -173,7 +171,8 @@ InstanceID = inst['InstanceID']) if len(assoc_info) < 1: - logger.error("'%s' has returned %i objects", an, len(assoc_info)) + logger.error("'%s' has returned %i objects", an, + len(assoc_info)) status = FAIL return status, alloccap @@ -181,10 +180,10 @@ if c != inst.classname: continue status, setdefcap = get_inst_from_list(an, - c, - assoc_info, - filter, - rt ) + c, + assoc_info, + filter, + rt ) if status != FAIL: alloccap.append(setdefcap) @@ -216,25 +215,7 @@ ccn, InstanceID = ap['InstanceID']) - curr_cim_rev, changeset = get_provider_version(virt, server) - exp_len = 4 - if 'DiskPool' in ap['InstanceID']: - # For Diskpool, we have info 1 for each of Min, Max, - # default, Increment and 1 for each of PV and FV - # hence 4 * 2 = 8 records - if virt == 'Xen': - if curr_cim_rev >= libvirt_rasd_template_changes and \ - curr_cim_rev < libvirt_rasd_new_changes: - exp_len = 8 - if curr_cim_rev >= libvirt_rasd_new_changes: - exp_len = 16 - if virt == 'KVM': - if curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = 8 - if curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, server) - exp_len = volumes * 4 + exp_len = get_exp_template_rasd_len(virt, server, ap['InstanceID']) if len(assoc_info) != exp_len: logger.error("'%s' returned %i RASD objects instead of %i", diff -r 7ce2862e03dc -r 7bac72ee3582 suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Sun Apr 19 16:29:59 2009 -0700 +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Sun Apr 19 16:30:00 2009 -0700 @@ -63,13 +63,9 @@ from XenKvmLib.const import do_main, default_pool_name, default_network_name from XenKvmLib.classes import get_typed_class from XenKvmLib.common_util import print_field_error -from XenKvmLib.const import get_provider_version -from XenKvmLib.pool import enum_volumes +from XenKvmLib.rasd import get_exp_template_rasd_len platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC'] -libvirt_rasd_template_changes = 707 -libvirt_rasd_new_changes = 805 -libvirt_rasd_dpool_changes = 839 memid = "MemoryPool/0" procid = "ProcessorPool/0" @@ -173,25 +169,7 @@ assoc_info = assoc.Associators(server, assoc_cname, cn, InstanceID = instid) - curr_cim_rev, changeset = get_provider_version(virt, server) - exp_len = 4 - if 'DiskPool' in instid: - # For Diskpool, we have info 1 for each of Min, Max, - # default, Increment and 1 for each of PV and FV - # hence 4 * 2 = 8 records - if virt == 'Xen' or virt == 'XenFV': - if curr_cim_rev >= libvirt_rasd_template_changes and \ - curr_cim_rev < libvirt_rasd_new_changes: - exp_len = 8 - if curr_cim_rev >= libvirt_rasd_new_changes: - exp_len = 16 - if virt == 'KVM': - if curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = 8 - if curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, server) - exp_len = volumes * 4 + exp_len = get_exp_template_rasd_len(virt, server, instid) if len(assoc_info) != exp_len: logger.error("%s returned %i ResourcePool objects instead"

+1 for this patch set. Best, Regards Daisy (运国莲) VSM Team, China Systems & Technology Labs (CSTL) E-mail: yunguol@cn.ibm.com TEL: (86)-21-60922403 Building 10, 399 Ke Yuan Rd, Pudong Shanghai, 201203 libvirt-cim-bounces@redhat.com wrote on 2009-04-20 07:32:52:
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
participants (3)
-
Deepti B Kalakeri
-
Guo Lian Yun
-
Kaitlin Rupert