[PATCH] [TEST] #3 Add support for DiskPoolRASD / NetPoolRASD to get_exp_template_rasd_len()

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1243630742 25200 # Node ID 88d210d65d3e405deb4f4693643f8e1ec7d7c1e8 # Parent 9a2db4596db33348b860a0e2337e377f237b0692 [TEST] #3 Add support for DiskPoolRASD / NetPoolRASD to get_exp_template_rasd_len() Also update HostSystem 03 to support DiskPoolRASD / NetPoolRASD Updates from2 to 3: -Template DiskPoolRASDs now exist for disk and iscsi type pools - number of pools should now be 5, not 3. -Also, pass pool name to enum_volumes() -Fix error in enum_volumes() Updates from 1 to 2: -Add instance id to log message HostSystem/03_hs_to_settdefcap.py: 223 -Rework the DiskRASD portion of get_exp_disk_rasd_len() - only check for volumed if the libvirt version is greater than 0.4.1. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py --- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Fri May 29 13:59:02 2009 -0700 @@ -204,8 +204,10 @@ else: rtype = { "%s_DiskResourceAllocationSettingData" % virt : 17, \ + "%s_DiskPoolResourceAllocationSettingData" % virt : 17, \ "%s_MemResourceAllocationSettingData" % virt : 4, \ "%s_NetResourceAllocationSettingData" % virt : 10, \ + "%s_NetPoolResourceAllocationSettingData" % virt : 10, \ "%s_ProcResourceAllocationSettingData" % virt : 3 } try: @@ -218,8 +220,8 @@ exp_len = get_exp_template_rasd_len(virt, server, ap['InstanceID']) if len(assoc_info) != exp_len: - logger.error("'%s' returned %i RASD objects instead of %i", - an, len(assoc_info), exp_len) + logger.error("%s returned %i RASD objects instead of %i for %s", + an, len(assoc_info), exp_len, ap['InstanceID']) return FAIL for inst in assoc_info: diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/lib/XenKvmLib/pool.py --- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Fri May 29 13:59:02 2009 -0700 @@ -92,7 +92,7 @@ def enum_volumes(virt, server, pooln=default_pool_name): volume = 0 cmd = "virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" % \ - (virt2uri(virt), default_pool_name) + (virt2uri(virt), pooln) ret, out = run_remote(server ,cmd) if ret != 0: return None diff -r 9a2db4596db3 -r 88d210d65d3e suites/libvirt-cim/lib/XenKvmLib/rasd.py --- a/suites/libvirt-cim/lib/XenKvmLib/rasd.py Wed Jun 03 13:00:09 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/rasd.py Fri May 29 13:59:02 2009 -0700 @@ -31,6 +31,8 @@ from XenKvmLib.const import default_pool_name, default_network_name, \ get_provider_version from XenKvmLib.pool import enum_volumes +from XenKvmLib.xm_virt_util import virsh_version +from XenKvmLib.common_util import parse_instance_id pasd_cn = 'ProcResourceAllocationSettingData' nasd_cn = 'NetResourceAllocationSettingData' @@ -304,45 +306,77 @@ return rasd_insts, PASS -def get_exp_template_rasd_len(virt, ip, id): +def get_exp_disk_rasd_len(virt, ip, rev, id): libvirt_rasd_template_changes = 707 libvirt_rasd_new_changes = 805 libvirt_rasd_dpool_changes = 839 - curr_cim_rev, changeset = get_provider_version(virt, ip) + libvirt_ver = virsh_version(ip, virt) # For Diskpool, we have info 1 for each of Min, Max, Default, and Incr exp_base_num = 4 exp_cdrom = 4 - exp_len = exp_base_num + exp_len = exp_base_num - if 'DiskPool' in id: - if virt == 'Xen' or virt == 'XenFV': - # For Xen and XenFV, there is a template for PV and FV, so you - # end up with double the number of templates - xen_multi = 2 + if id == "DiskPool/0": + pool_types = 5 + return exp_base_num * pool_types + + if virt == 'Xen' or virt == 'XenFV': + # For Xen and XenFV, there is a template for PV and FV, so you + # end up with double the number of templates + xen_multi = 2 - if curr_cim_rev >= libvirt_rasd_template_changes and \ - curr_cim_rev < libvirt_rasd_new_changes: - exp_len = exp_base_num + exp_cdrom + if rev >= libvirt_rasd_template_changes and \ + rev < libvirt_rasd_new_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = (exp_base_num + exp_cdrom) * xen_multi + elif rev >= libvirt_rasd_dpool_changes and libvirt_ver >= '0.4.1': + volumes = enum_volumes(virt, ip) + exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = ((volumes * exp_base_num) + exp_cdrom) * xen_multi + exp_len = (exp_base_num + exp_cdrom) * xen_multi - elif virt == 'KVM': - if curr_cim_rev >= libvirt_rasd_new_changes and \ - curr_cim_rev < libvirt_rasd_dpool_changes: - exp_len = exp_base_num + exp_cdrom + elif virt == 'KVM': + if rev >= libvirt_rasd_new_changes and \ + rev < libvirt_rasd_dpool_changes: + exp_len = exp_base_num + exp_cdrom - elif curr_cim_rev >= libvirt_rasd_dpool_changes: - volumes = enum_volumes(virt, ip) - exp_len = (volumes * exp_base_num) + exp_cdrom + elif rev >= libvirt_rasd_dpool_changes: + id = parse_instance_id(id) + volumes = enum_volumes(virt, ip, id[1]) + exp_len = (volumes * exp_base_num) + exp_cdrom return exp_len +def get_exp_net_rasd_len(virt, rev, id): + net_rasd_template_changes = 861 + + exp_base_num = 4 + + if id == "NetworkPool/0": + pool_types = 3 + forward_modes = 2 + + return (exp_base_num * pool_types) + (exp_base_num * forward_modes) + + if rev >= net_rasd_template_changes: + dev_types = 2 + + return exp_base_num * dev_types + +def get_exp_template_rasd_len(virt, ip, id): + curr_cim_rev, changeset = get_provider_version(virt, ip) + + exp_len = 4 + + if 'DiskPool' in id: + exp_len = get_exp_disk_rasd_len(virt, ip, curr_cim_rev, id) + + elif 'NetworkPool' in id: + exp_len = get_exp_net_rasd_len(virt, curr_cim_rev, id) + + return exp_len + +

This test fails with seg fault on Xen with current sources with the following error: Testing Xen hypervisor -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Got CIM error The web server returned a bad status line: '' with return code 0 ERROR - Failed to define the dom: domgst_test InvokeMethod(DefineSystem): The web server returned a bad status line: '' -------------------------------------------------------------------- -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

Deepti B Kalakeri wrote:
This test fails with seg fault on Xen with current sources with the following error:
Testing Xen hypervisor -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Got CIM error The web server returned a bad status line: '' with return code 0 ERROR - Failed to define the dom: domgst_test InvokeMethod(DefineSystem): The web server returned a bad status line: '' --------------------------------------------------------------------
Here is the debug message for the HostSystem/03_hs_to_settdefcap.py failure: xmlgen.c(713): New UUID xmlgen.c(145): Disk: 2 /tmp/default-xen-dimage xvda Virt_VirtualSystemManagementService.c(1281): System XML: <domain type="xen"> <name>domgst_test</name> <on_poweroff>destroy</on_poweroff> <on_crash>destroy</on_crash> <uuid>1928ab2c-e51b-44a8-a533-2b2d93bf93cb</uuid> <os> <type>linux</type> <kernel>/tmp/default-xen-kernel</kernel> <initrd>/tmp/default-xen-initrd</initrd> <cmdline/> </os> <currentMemory>131072</currentMemory> <memory>131072</memory> <vcpu>1</vcpu> <devices> <disk type="file" device="disk"> <source file="/tmp/default-xen-dimage"/> <target dev="xvda"/> </disk> <interface type="network"> <mac address="11:22:33:aa:bb:cc"/> <source network="cimtest-networkpool"/> </interface> <input type="mouse" bus="xen"/> <graphics type="vnc" port="-1" listen="127.0.0.1" keymap="en-us"/> </devices> </domain> misc_util.c(75): Connecting to libvirt with uri `xen' misc_util.c(75): Connecting to libvirt with uri `xen' misc_util.c(202): URI of connection is: xen:/// misc_util.c(202): URI of connection is: xen:/// device_parsing.c(276): Disk node: disk infostore.c(88): Path is /etc/libvirt/cim/Xen_domgst_test infostore.c(88): Path is /etc/libvirt/cim/Xen_domgst_test infostore.c(250): Deleted /etc/libvirt/cim/Xen_domgst_test infostore.c(88): Path is /etc/libvirt/cim/Xen_domgst_test infostore.c(356): Creating new node uuid=1928ab2c-e51b-44a8-a533-2b2d93bf93cb Segmentation fault Hmmm! I tried debugging this ..I put a lot of debug messages and I found that the problem lies in the libxkutil/device_parsing.c cleanup_dominfo() function It faults when we try to *"free(dom->os_info.pv.initrd); " . *The struct domain does not seem to have initrd property see below: ============ struct domain { enum { DOMAIN_XENPV, DOMAIN_XENFV, DOMAIN_KVM, DOMAIN_QEMU, DOMAIN_LXC } type; char *name; char *typestr; /*xen, kvm, etc */ char *uuid; char *bootloader; char *bootloader_args; char *clock; union { struct pv_os_info pv; struct fv_os_info fv; struct lxc_os_info lxc; } os_info; int on_poweroff; int on_reboot; int on_crash; struct virt_device *dev_graphics; int dev_graphics_ct; struct virt_device *dev_emu; struct virt_device *dev_input; int dev_input_ct; struct virt_device *dev_mem; int dev_mem_ct; struct virt_device *dev_net; int dev_net_ct; struct virt_device *dev_disk; int dev_disk_ct; struct virt_device *dev_vcpu; int dev_vcpu_ct; }; I commented out the part of code "free*(dom->os_info.pv.initrd)" and the test case passed. * -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

Deepti B Kalakeri wrote:
Deepti B Kalakeri wrote:
This test fails with seg fault on Xen with current sources with the following error:
Testing Xen hypervisor -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Got CIM error The web server returned a bad status line: '' with return code 0 ERROR - Failed to define the dom: domgst_test InvokeMethod(DefineSystem): The web server returned a bad status line: '' --------------------------------------------------------------------
Here is the debug message for the HostSystem/03_hs_to_settdefcap.py
Thanks Deepti! I'll be sending a patch to fix this issue in the provider. The problem is that we were assigning the bootlist variable for PV guests. which isn't valid since they don't use a bootlist. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
Deepti B Kalakeri wrote:
Deepti B Kalakeri wrote:
This test fails with seg fault on Xen with current sources with the following error:
Testing Xen hypervisor -------------------------------------------------------------------- HostSystem - 03_hs_to_settdefcap.py: FAIL ERROR - Got CIM error The web server returned a bad status line: '' with return code 0 ERROR - Failed to define the dom: domgst_test InvokeMethod(DefineSystem): The web server returned a bad status line: '' --------------------------------------------------------------------
Here is the debug message for the HostSystem/03_hs_to_settdefcap.py
Thanks Deepti! I'll be sending a patch to fix this issue in the provider. The problem is that we were assigning the bootlist variable for PV guests. which isn't valid since they don't use a bootlist.
+1 . The tc now passes with the "Don't set bootlist for PV and LXC guests". But, I see the following message which I think need to have a look: misc_util.c(202): URI of connection is: xen:/// Virt_DevicePool.c(272): Failed to statvfs((null)): Bad address Virt_DevicePool.c(936): Failed to set capacity for disk pool: 0 std_association.c(304): Handler returned CMPI_RC_OK. Also, I get the following messages: Testing Xen hypervisor -------------------------------------------------------------------- libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain HostSystem - 03_hs_to_settdefcap.py: PASS -------------------------------------------------------------------- -- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

The tc now passes with the "Don't set bootlist for PV and LXC guests". But, I see the following message which I think need to have a look:
misc_util.c(202): URI of connection is: xen:/// Virt_DevicePool.c(272): Failed to statvfs((null)): Bad address Virt_DevicePool.c(936): Failed to set capacity for disk pool: 0 std_association.c(304): Handler returned CMPI_RC_OK.
Also, I get the following messages:
Testing Xen hypervisor -------------------------------------------------------------------- libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain libvir: error : invalid argument in __virGetDomain HostSystem - 03_hs_to_settdefcap.py: PASS --------------------------------------------------------------------
This looks like there is an invalid domain on your system. I've seen issues with other versions of Xen where the domain config gets corrupted, and libvirt can see the domain, but xend can't. You can try restarting xend and libvirtd. That might allow you to remove the offending guest. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (2)
-
Deepti B Kalakeri
-
Kaitlin Rupert