[PATCH] [TEST] Update VSMigrationS.01 for XenFV support, also convert test_xml call to vxml
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1216720500 25200
# Node ID 51375198603807e18dd34c986a03acbd2e751b32
# Parent 3703b7be5a107c67e901546978e974546b3d5562
[TEST] Update VSMigrationS.01 for XenFV support, also convert test_xml call to vxml
Signed-off-by: Guolian Yun <yunguol(a)cn.ibm.com>
diff -r 3703b7be5a10 -r 513751986038 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/01_migratable_host.py
--- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/01_migratable_host.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/01_migratable_host.py Tue Jul 22 02:55:00 2008 -0700
@@ -27,8 +27,8 @@
import pywbem
from pywbem.cim_obj import CIMInstanceName
from VirtLib import utils
-from XenKvmLib.test_doms import define_test_domain, start_test_domain, destroy_and_undefine_domain
-from XenKvmLib.test_xml import *
+from XenKvmLib.test_doms import destroy_and_undefine_domain
+from XenKvmLib import vxml
from XenKvmLib import computersystem
from XenKvmLib import vsmigrations
from XenKvmLib.vsmigrations import check_possible_host_migration, migrate_guest_to_host, check_migration_job
@@ -36,25 +36,18 @@
from CimTest.Globals import logger, CIM_ERROR_ENUMERATE, do_main
from CimTest.ReturnCodes import PASS, FAIL, XFAIL
-sup_types = ['Xen']
+sup_types = ['Xen', 'XenFV']
dom_name = 'dom_migrate'
-def start_guest_get_ref(ip, guest_name):
- try:
- xmlfile = testxml(guest_name)
- ret = define_test_domain(xmlfile, ip)
- if not ret:
- return FAIL, None
+def start_guest_get_ref(ip, guest_name, virt='Xen'):
+ virt_xml = vxml.get_class(virt)
+ cxml = virt_xml(guest_name)
+ ret = cxml.create(ip)
+ if not ret:
+ logger.error("Error create domain %s" % guest_name)
+ return FAIL
- ret = start_test_domain(guest_name, ip)
- if not ret:
- return FAIL, None
-
- time.sleep(10)
- except Exception:
- logger.error("Error creating domain %s" % guest_name)
- return FAIL, None
-
+ time.sleep(10)
classname = 'Xen_ComputerSystem'
cs_ref = CIMInstanceName(classname, keybindings = {
'Name':guest_name,
@@ -85,9 +78,9 @@
else:
local_migrate = 0
- status, cs_ref = start_guest_get_ref(options.ip, dom_name)
+ status, cs_ref = start_guest_get_ref(options.ip, dom_name, options.virt)
if status != PASS:
- destroy_and_undefine_domain(guest_name, options.ip)
+ destroy_and_undefine_domain(dom_name, options.ip)
return FAIL
guest_name = cs_ref['Name']
16 years, 5 months
[PATCH] [TEST] Update VSMigrationS.05 for XenFV support, also convert test_xml call to vxml
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1216719536 25200
# Node ID 5368f189cb2196ac260e07c4f2ea958ffdf68807
# Parent 3703b7be5a107c67e901546978e974546b3d5562
[TEST] Update VSMigrationS.05 for XenFV support, also convert test_xml call to vxml
Signed-off-by: Guolian Yun <yunguol(a)cn.ibm.com>
diff -r 3703b7be5a10 -r 5368f189cb21 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/05_migratable_host_errs.py
--- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/05_migratable_host_errs.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/05_migratable_host_errs.py Tue Jul 22 02:38:56 2008 -0700
@@ -24,14 +24,14 @@
import pywbem
from pywbem.cim_obj import CIMInstanceName
from VirtLib import utils
-from XenKvmLib.test_doms import define_test_domain, start_test_domain, destroy_and_undefine_domain
-from XenKvmLib.test_xml import *
+from XenKvmLib.test_doms import destroy_and_undefine_domain
+from XenKvmLib import vxml
from XenKvmLib import computersystem
from XenKvmLib import vsmigrations
from CimTest.Globals import logger, do_main
from CimTest.ReturnCodes import PASS, FAIL, XFAIL
-sup_types = ['Xen']
+sup_types = ['Xen', 'XenFV']
test_dom = 'dom_migration'
exp_rc = 1 #CIM_ERR_FAILED
@@ -40,23 +40,17 @@
@do_main(sup_types)
def main():
options = main.options
- xmlfile = testxml(test_dom)
+
+ virt_xml = vxml.get_class(options.virt)
+ cxml = virt_xml(test_dom)
+ ret = cxml.create(options.ip)
+ if not ret:
+ logger.error("Error create domain %s" % test_dom )
+ return FAIL
status = FAIL
rc = -1
- try:
- define_test_domain(xmlfile, options.ip)
- except Exception:
- logger.error("Error define domain %s" % test_dom)
- return FAIL
-
- try:
- start_test_domain(test_dom, options.ip)
- except Exception:
- logger.error("Error start domain %s" % test_dom)
- return FAIL
-
try:
service = vsmigrations.Xen_VirtualSystemMigrationService(options.ip)
except Exception:
16 years, 5 months
[PATCH] [TEST] #3 Update VSSDC.01 for KVM/XenFV/LXC support
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1216706533 25200
# Node ID e32675a6d5ca7d92d0ebcd826dbf1332c3b6690f
# Parent 3703b7be5a107c67e901546978e974546b3d5562
[TEST] #3 Update VSSDC.01 for KVM/XenFV/LXC support
The test defines non-bootloader guests for all platform types, also remove the
part of thet test that verifies the bootloader
Updates from 2 to 3:
Remove build_vssd_info() and call compare_all_prop() in assoc_values()
Signed-off-by: Guolian Yun <yunguol(a)cn.ibm.com>
diff -r 3703b7be5a10 -r e32675a6d5ca suites/libvirt-cim/cimtest/VirtualSystemSettingDataComponent/01_forward.py
--- a/suites/libvirt-cim/cimtest/VirtualSystemSettingDataComponent/01_forward.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/cimtest/VirtualSystemSettingDataComponent/01_forward.py Mon Jul 21 23:02:13 2008 -0700
@@ -52,32 +52,31 @@
import sys
from XenKvmLib import enumclass
from VirtLib import utils
-from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all
-from XenKvmLib.test_xml import testxml_bl
-from XenKvmLib.test_xml import xml_get_dom_bootloader
+from XenKvmLib.test_doms import destroy_and_undefine_all
+from XenKvmLib.assoc import compare_all_prop
from CimTest import Globals
from XenKvmLib import assoc
+from XenKvmLib import vxml
+from XenKvmLib.classes import get_typed_class
from CimTest.Globals import logger, do_main
from CimTest.ReturnCodes import FAIL, PASS
-sup_types = ['Xen']
+sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
test_dom = "VSSDC_dom"
test_vcpus = 2
test_mac = "00:11:22:33:44:aa"
test_disk = 'xvda'
-status = 0
-VSType = "Xen"
-def init_list():
+def init_list(test_disk, test_mac, virt='Xen'):
"""
Creating the lists that will be used for comparisons.
"""
- rlist = ['Xen_DiskResourceAllocationSettingData',
- 'Xen_MemResourceAllocationSettingData',
- 'Xen_NetResourceAllocationSettingData',
- 'Xen_ProcResourceAllocationSettingData'
+ rlist = [get_typed_class(virt, 'DiskResourceAllocationSettingData'),
+ get_typed_class(virt, 'MemResourceAllocationSettingData'),
+ get_typed_class(virt, 'NetResourceAllocationSettingData'),
+ get_typed_class(virt, 'ProcResourceAllocationSettingData')
]
prop_list = {rlist[0] : "%s/%s" % (test_dom, test_disk),
@@ -85,32 +84,13 @@
rlist[2] : "%s/%s" % (test_dom, test_mac),
rlist[3] : "%s/%s" % (test_dom, "proc")
}
+ if virt == 'LXC':
+ rlist = [get_typed_class(virt, 'MemResourceAllocationSettingData')]
+ prop_list = {rlist[0] : "%s/%s" % (test_dom, "mem")}
return prop_list
-def build_vssd_info(ip, vssd):
- """
- Creating the vssd fileds lists that will be used for comparisons.
- """
-
- if vssd.Bootloader == "" or vssd.Caption == "" or \
- vssd.InstanceID == "" or vssd.ElementName == "" or \
- vssd.VirtualSystemIdentifier == "" or vssd.VirtualSystemType == "":
- logger.error("One of the required VSSD details seems to be empty")
- test_domain_function(test_dom, ip, "undefine")
- return FAIL
-
- vssd_vals = {'Bootloader' : vssd.Bootloader,
- 'Caption' : vssd.Caption,
- 'InstanceID' : vssd.InstanceID,
- 'ElementName' : vssd.ElementName,
- 'VirtualSystemIdentifier' : vssd.VirtualSystemIdentifier,
- 'VirtualSystemType' : vssd.VirtualSystemType
- }
-
- return vssd_vals
-
-def assoc_values(ip, assoc_info, cn, an, vals):
+def assoc_values(ip, assoc_info, cn, an, vssd):
"""
The association info of
Xen_VirtualSystemSettingDataComponent with every RASDclass is
@@ -124,15 +104,8 @@
Globals.logger.error("%s returned %i resource objects for '%s'" % \
(an, len(assoc_info), cn))
return FAIL
-
- for prop, val in vals.iteritems():
- if assoc_info[0][prop] != val:
- Globals.logger.error("%s mismatch: returned %s instead of %s" %\
- (prop, assoc_info[0][prop], val))
- return FAIL
-
- return PASS
-
+ status = compare_all_prop(assoc_info[0], vssd)
+ return status
except Exception, detail :
logger.error("Exception in assoc_values function: %s" % detail)
return FAIL
@@ -143,49 +116,53 @@
status = FAIL
destroy_and_undefine_all(options.ip)
- test_xml = testxml_bl(test_dom, vcpus = test_vcpus, \
- mac = test_mac, disk = test_disk, \
- server = options.ip,\
- gtype = 0)
- ret = test_domain_function(test_xml, options.ip, cmd = "define")
+ prop_list = init_list(test_disk, test_mac, options.virt)
+ virt_xml = vxml.get_class(options.virt)
+ if options.virt == 'LXC':
+ cxml = virt_xml(test_dom)
+ else:
+ cxml = virt_xml(test_dom, vcpus = test_vcpus, \
+ mac = test_mac, disk = test_disk)
+ ret = cxml.define(options.ip)
if not ret:
logger.error("Failed to define the dom: %s", test_dom)
return FAIL
- instIdval = "%s:%s" % (VSType, test_dom)
+ if options.virt == 'XenFV':
+ instIdval = "Xen:%s" % test_dom
+ else:
+ instIdval = "%s:%s" % (options.virt, test_dom)
+
keyname = "InstanceID"
+ key_list = { 'InstanceID' : instIdval }
+ vssd_cn = get_typed_class(options.virt, 'VirtualSystemSettingData')
- key_list = { 'InstanceID' : instIdval }
try:
vssd = enumclass.getInstance(options.ip, \
- enumclass.Xen_VirtualSystemSettingData, \
- key_list)
+ 'VirtualSystemSettingData', \
+ key_list, \
+ options.virt)
if vssd is None:
logger.error("VSSD instance for %s not found" % test_dom)
- test_domain_function(test_dom, options.ip, "undefine")
+ cxml.undefine(options.ip)
return FAIL
-
- vssd_vals = build_vssd_info(options.ip, vssd)
-
except Exception, detail :
- logger.error(Globals.CIM_ERROR_GETINSTANCE, \
- 'Xen_VirtualSystemSettingData')
+ logger.error(Globals.CIM_ERROR_GETINSTANCE, vssd_cn)
logger.error("Exception : %s" % detail)
- test_domain_function(test_dom, options.ip, "undefine")
+ cxml.undefine(options.ip)
return FAIL
- prop_list = init_list()
try:
# Looping through the RASD_cllist, call association
# Xen_VirtualSystemSettingDataComponent with each class in RASD_cllist
- an = 'Xen_VirtualSystemSettingDataComponent'
+ an = get_typed_class(options.virt, 'VirtualSystemSettingDataComponent')
for rasd_cname, prop in prop_list.iteritems():
assoc_info = assoc.Associators(options.ip, an, rasd_cname,
- InstanceID = prop)
+ options.virt, InstanceID = prop)
# Verify the association fields returned for particular rasd_cname.
status = assoc_values(options.ip, assoc_info, rasd_cname, an,
- vssd_vals)
+ vssd)
if status != PASS:
break
@@ -194,7 +171,7 @@
logger.error("Exception : %s" % detail)
status = FAIL
- test_domain_function(test_dom, options.ip, "undefine")
+ cxml.undefine(options.ip)
return status
if __name__ == "__main__":
16 years, 5 months
[PATCH] [TEST] #2 Fix potiential false positive in AC 01
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1216502229 25200
# Node ID de486893703609719ea79d53ee64828537826211
# Parent 3703b7be5a107c67e901546978e974546b3d5562
[TEST] #2 Fix potiential false positive in AC 01.
This test needs to verify enum of AC returns the same number of instances as the number of instances returned by enum of MemoryPool + ProcessorPool + DiskPool + NetworkPool.
Also, cleaned up the logic to verify that the ResourceType and the InstanceIDs of the AC instances match the Pool instances they're describing.
Updates form 1 to 2:
-Create a disk pool and network pool to ensure there is an instance for each pool type.
-Add comment explaining the purpose of the test.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 3703b7be5a10 -r de4868937036 suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py
--- a/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Sat Jul 19 14:17:09 2008 -0700
@@ -20,63 +20,97 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+# This test verifies the enum of AC returns the same number of instances as
+# the number of instances returned by enum of:
+# MemoryPool + ProcessorPool + DiskPool + NetworkPool.
+#
import sys
+from VirtLib.live import virsh_version
from XenKvmLib import enumclass
from CimTest.Globals import do_main
from CimTest.Globals import logger, CIM_ERROR_ENUMERATE, platform_sup
from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \
+ create_netpool_conf, destroy_netpool
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
+
+def enum_pools_and_ac(ip, virt, cn):
+ pools = {}
+ ac = []
+
+ pt = ['MemoryPool', 'ProcessorPool', 'DiskPool', 'NetworkPool']
+
+ try:
+ key = ["InstanceID"]
+ ac = enumclass.enumerate(ip, cn, key, virt)
+
+ for p in pt:
+ enum_list = enumclass.enumerate(ip, p, key, virt)
+
+ if len(enum_list) < 1:
+ logger.error("%s did not return any instances" % p)
+ return pools, ac
+
+ for pool in enum_list:
+ pools[pool.InstanceID] = pool
+
+ except Exception, details:
+ logger.error(CIM_ERROR_ENUMERATE, cn)
+ logger.error(details)
+ return pools, ac
+
+ if len(ac) != len(pools):
+ logger.error("%s returned %s instances, expected %s" % (cn, len(ac),
+ len(pools)))
+ return pools, ac
+
+def compare_pool_to_ac(ac, pools, cn):
+ try:
+ for inst in ac:
+ id = inst.InstanceID
+ if pools[id].ResourceType != inst.ResourceType:
+ logger.error("%s ResourceType %s, Pool ResourceType %s" % (cn,
+ inst.ResourceType, pools[id].ResourceType))
+ return FAIL
+
+ except Exception, details:
+ logger.error("%s returned instance with unexpected InstanceID %s" % (cn,
+ details))
+ return FAIL
+
+ return PASS
+
@do_main(sup_types)
def main():
options = main.options
- pools = {}
- pt = ['MemoryPool', 'ProcessorPool', 'DiskPool', 'NetworkPool']
- try:
- key_list = ["InstanceID"]
- ac = enumclass.enumerate(options.ip,
- "AllocationCapabilities",
- key_list,
- options.virt)
- pools['MemoryPool'] = enumclass.enumerate(options.ip,
- "MemoryPool",
- key_list,
- options.virt)
- pools['ProcessorPool'] = enumclass.enumerate(options.ip,
- "ProcessorPool",
- key_list,
- options.virt)
- pools['DiskPool'] = enumclass.enumerate(options.ip,
- "DiskPool",
- key_list,
- options.virt)
- pools['NetworkPool'] = enumclass.enumerate(options.ip,
- "NetworkPool",
- key_list,
- options.virt)
- except Exception:
- logger.error(CIM_ERROR_ENUMERATE, '%s_AllocationCapabilities' % options.virt)
- return FAIL
-
- acset = set([(x.InstanceID, x.ResourceType) for x in ac])
- poolset = set()
- for pl in pools.values():
- for x in pl:
- poolset.add((x.InstanceID, x.ResourceType))
+ cn = 'AllocationCapabilities'
- if len(acset) != len(poolset):
- logger.error(
- 'AllocationCapabilities return %i instances, excepted %i'
- % (ac_size, pool_size))
- return FAIL
- zeroset = acset - poolset
- if len(zeroset) != 0:
- logger.error('AC is inconsistent with pools')
+ status, diskid = create_diskpool_conf(options.ip, options.virt)
+ if status != PASS:
+ cleanup_restore(options.ip, options.virt)
+ return FAIL
+
+ status, test_network = create_netpool_conf(options.ip, options.virt)
+ if status != PASS:
+ cleanup_restore(options.ip, options.virt)
+ destroy_netpool(options.ip, options.virt, test_network)
+ return FAIL
+
+ pools, ac = enum_pools_and_ac(options.ip, options.virt, cn)
+ if len(pools) < 1:
+ cleanup_restore(options.ip, options.virt)
+ destroy_netpool(options.ip, options.virt, test_network)
return FAIL
- return PASS
+ status = compare_pool_to_ac(ac, pools, cn)
+
+ cleanup_restore(options.ip, options.virt)
+ destroy_netpool(options.ip, options.virt, test_network)
+
+ return status
if __name__ == "__main__":
sys.exit(main())
16 years, 5 months
last problem
by 072021096
Registering class LXC_VirtualSystemMigrationService
Registering class Xen_ElementConformsToProfile
Registering class KVM_ElementConformsToProfile
Registering class Xen_VirtualSystemMigrationSettingData
Registering class KVM_VirtualSystemMigrationSettingData
Registering class LXC_VirtualSystemMigrationSettingData
Registering class Xen_VirtualSystemSnapshotService
Registering class KVM_VirtualSystemSnapshotService
Registering class LXC_VirtualSystemSnapshotService
Registering class Xen_VirtualSystemSnapshotServiceCapabilities
Registering class KVM_VirtualSystemSnapshotServiceCapabilities
Registering class LXC_VirtualSystemSnapshotServiceCapabilities
Staging provider registration.
Rebuilding repository.
sfcbmof: error while loading shared libraries: libsfcBrokerCore.so.0: cannot open shared object file: No such file or directory
Failed compiling the MOF files.
make: *** [postinstall] Error 1
[root@seanggz libvirt-cim-0.4]#
Hello, That's the last problem,do i need some configuration?
I just use the command line in the README : make postinstall
Thanks!
16 years, 5 months
[PATCH] [TEST] #3 Fix CS 22 to use providers instead of virsh
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1216661611 25200
# Node ID abcd4c8a873656b6f12c4416832d185f8c9eb151
# Parent 18d3c235f0c893f934aaf61f9a1de22ed6f3dd60
[TEST] #3 Fix CS 22 to use providers instead of virsh.
This test was defining a guest with virsh and then suspending it with virsh, which doesn't touch the providers in anyway. Now the test calls DefineSystem() and RequestStateChange().
Updates from 1 to 2:
-Add check to verify guest is the expected state after the RequestStateChange() call.
-Create a network pool because the VSMS provider requires a network pool to exist in order to create a guest.
Updates from 2 to 3:
-Remove destroy_netpool() if create_netpool_conf() fails. This is not needed.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 18d3c235f0c8 -r abcd4c8a8736 suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py
--- a/suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py Mon Jul 21 09:51:33 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py Mon Jul 21 10:33:31 2008 -0700
@@ -32,54 +32,71 @@
from XenKvmLib import computersystem
from VirtLib import utils
from XenKvmLib import vxml
-from XenKvmLib.test_doms import destroy_and_undefine_all
-from CimTest.Globals import do_main
-from CimTest import Globals
+from XenKvmLib.test_doms import destroy_and_undefine_domain
+from CimTest.Globals import do_main, logger
from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.common_util import create_using_definesystem, \
+ call_request_state_change, get_cs_instance, \
+ create_netpool_conf, destroy_netpool
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
test_dom = "domgst"
+DEFINE_STATE = 3
+SUSPND_STATE = 9
+TIME = "00000000000000.000000:000"
+
+def chk_state(domain_name, ip, en_state, virt):
+ rc, cs = get_cs_instance(domain_name, ip, virt)
+ if rc != 0:
+ return rc
+
+ if cs.EnabledState != en_state:
+ logger.error("EnabledState should be %d not %d",
+ en_state, cs.EnabledState)
+ return FAIL
+
+ return PASS
+
@do_main(sup_types)
def main():
options = main.options
- status = FAIL
-
- cxml = vxml.get_class(options.virt)(test_dom)
-#define VS
+ status, test_network = create_netpool_conf(options.ip, options.virt)
+ if status != PASS:
+ return FAIL
+
try:
- ret = cxml.define(options.ip)
- if not ret:
- Globals.logger.error(Globals.VIRSH_ERROR_DEFINE % test_dom)
- return status
-
- cs = computersystem.get_cs_class(options.virt)(options.ip, test_dom)
- if not (cs.Name == test_dom) :
- Globals.logger.error("Error: VS %s not found" % test_dom)
- cxml.undefine(options.ip)
+ # define the vs
+ status = create_using_definesystem(test_dom, options.ip,
+ virt=options.virt)
+ if status != PASS:
+ logger.error("Unable to define %s using DefineSystem()" % test_dom)
+ destroy_netpool(options.ip, options.virt, test_network)
return status
- except Exception, detail:
- Globals.logger.error("Errors: %s" % detail)
+ # suspend the vs
+ status = call_request_state_change(test_dom, options.ip, SUSPND_STATE,
+ TIME, virt=options.virt)
+ if status != PASS:
+ logger.info("Suspending defined %s failed, as expected" % test_dom)
+ status = PASS
-#Suspend the defined VS
-
- try:
- ret = cxml.run_virsh_cmd(options.ip, "suspend")
- if not ret :
- Globals.logger.info("Suspending defined VS %s failed, as expected" \
-% test_dom)
- status = PASS
+ status = chk_state(test_dom, options.ip, DEFINE_STATE, options.virt)
+ if status != PASS:
+ logger.error("%s not in defined state as expected." % test_dom)
+ status = FAIL
+
else :
- Globals.logger.info("Error: Suspending defined VS %s should not \
-have been allowed" % test_dom)
+ logger.error("Suspending defined %s should have failed" % test_dom)
status = FAIL
except Exception, detail:
- Globals.logger.error("Error: %s" % detail)
+ logger.error("Error: %s" % detail)
+ status = FAIL
- ret = cxml.undefine(options.ip)
+ destroy_netpool(options.ip, options.virt, test_network)
+ destroy_and_undefine_domain(test_dom, options.ip, options.virt)
return status
if __name__ == "__main__":
16 years, 5 months
[PATCH] [TEST] #2 Enabling 02_reverse.py with XenFV, KVM, LXC and modified logicaldevices.py to work for all virt types
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1216734147 25200
# Node ID 6d1ac5c3497b95a230217f8c5f8b226c07fb1b31
# Parent 3703b7be5a107c67e901546978e974546b3d5562
[TEST] #2 Enabling 02_reverse.py with XenFV, KVM, LXC and modified logicaldevices.py to work for all virt types.
Changes:
-------
Patch 2:
-------
1) Added the missing cleanup_restore() fn in the tc.
In logicaldevices library:
--------------------------
1) Moved the common code from verify_proc_values(), verify_net_values(), verify_disk_values(), verify_mem_values() to verify_device_values().
2) Eliminated verify_proc_values() since it did not require any processor specific checks apart from the ones included in verify_device_values().
Patch 1:
--------
1) Removed unnecessary import statements.
2) Updated verify_proc_values, verify_mem_values, verify_net_values, verify_disk_values function of logicaldevices to work with all virt types.
3) Used verify_proc_values, verify_mem_values, verify_net_values, verify_disk_values fn.
4) Removed conf_file(), clean_up_restore(), get_or_bail(), print_error(), init_list(), get_spec_fields_list(), assoc_values() fns.
5) Removed global status variable.
6) Added verify_eafp_values(), init_pllist(), create_diskpool_conf() fn.
7) Included create_diskpool_conf(), cleanup_restore() fn.
Tested on KVM on rpm, KVM current sources, LXC, XenFV, Xen.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 3703b7be5a10 -r 6d1ac5c3497b suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Tue Jul 22 06:42:27 2008 -0700
@@ -20,9 +20,8 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
-
# This tc is used to verify the classname, InstanceID and certian prop are
-# are appropriately set for the domains when verified using the
+# appropriately set for the domains when verified using the
# Xen_ElementAllocatedFromPool asscoiation.
#
# Example command for LogicalDisk w.r.t to Xen_ElementAllocatedFromPool \
@@ -47,111 +46,79 @@
import sys
import os
-from distutils.file_util import move_file
import pywbem
-from VirtLib import utils
-from VirtLib import live
-from XenKvmLib import assoc
-from XenKvmLib import enumclass
-from CimTest import Globals
-from CimTest.Globals import do_main
-from CimTest.ReturnCodes import PASS, FAIL, SKIP
-from XenKvmLib.test_xml import testxml_bridge
-from XenKvmLib.test_doms import test_domain_function, destroy_and_undefine_all
-from VirtLib.live import network_by_bridge
+from XenKvmLib.assoc import Associators
+from XenKvmLib.vxml import get_class
+from CimTest.Globals import do_main, logger, CIM_ERROR_ASSOCIATORS
+from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.test_doms import destroy_and_undefine_all
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore
+from XenKvmLib.logicaldevices import verify_device_values
-sup_types = ['Xen']
+sup_types = ['Xen' , 'KVM', 'XenFV', 'LXC']
-status = PASS
-test_dom = "hd_domain"
+test_dom = "eafp_domain"
test_mac = "00:11:22:33:44:aa"
test_mem = 128
test_vcpus = 4
-test_disk = "xvdb"
-test_dpath = "foo"
-disk_file = '/tmp/diskpool.conf'
-back_disk_file = disk_file + "." + "02_reverse"
-diskid = "%s/%s" % ("DiskPool", test_dpath)
-memid = "%s/%s" % ("MemoryPool", 0)
-procid = "%s/%s" % ("ProcessorPool", 0)
-def conf_file():
- """
- Creating diskpool.conf file.
- """
- try:
- f = open(disk_file, 'w')
- f.write('%s %s' % (test_dpath, '/'))
- f.close()
- except Exception,detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- sys.exit(status)
+def init_pllist(virt, vsxml, diskid):
+ keys = {
+ 'MemoryPool' : 'MemoryPool/0',
+ }
+ if virt != 'LXC':
+ virt_network = vsxml.xml_get_net_network()
+ keys['DiskPool'] = diskid
+ keys['ProcessorPool'] = 'ProcessorPool/0'
+ keys['NetworkPool'] = 'NetworkPool/%s' %virt_network
-def clean_up_restore(ip):
- """
- Restoring back the original diskpool.conf
- file.
- """
- try:
- if os.path.exists(back_disk_file):
- os.remove(disk_file)
- move_file(back_disk_file, disk_file)
- except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- status = SKIP
- ret = test_domain_function(test_dom, ip, \
- cmd = "destroy")
- sys.exit(status)
-
+ pllist = { }
+ for cn, k in keys.iteritems():
+ cn = get_typed_class(virt, cn)
+ pllist[cn] = k
-def get_or_bail(ip, id, pool_class):
- """
- Getinstance for the CLass and return instance on success, otherwise
- exit after cleanup_restore and destroying the guest.
- """
- key_list = { 'InstanceID' : id }
- try:
- instance = enumclass.getInstance(ip, pool_class, key_list)
- except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_GETINSTANCE, '%s', pool_class)
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore(ip)
- status = FAIL
- ret = test_domain_function(test_dom, ip, \
- cmd = "destroy")
- sys.exit(status)
- return instance
+ return pllist
-def print_error(field, ret_val, req_val):
- Globals.logger.error("%s Mismatch", field)
- Globals.logger.error("Returned %s instead of %s", ret_val, req_val)
+def eafp_list(virt, test_disk):
+ mcn = get_typed_class(virt, "Memory")
+ mem = {
+ 'SystemName' : test_dom,
+ 'CreationClassName' : mcn,
+ 'DeviceID' : "%s/%s" % (test_dom, "mem"),
+ 'NumberOfBlocks' : test_mem * 1024
+ }
-def init_list(ip, disk, mem, net, proc):
- """
- Creating the lists that will be used for comparisons.
- """
+ eaf_values = { mcn : mem }
- pllist = {
- "Xen_DiskPool" : disk.InstanceID, \
- "Xen_MemoryPool" : mem.InstanceID, \
- "Xen_NetworkPool" : net.InstanceID, \
- "Xen_ProcessorPool": proc.InstanceID
- }
- cllist = [
- "Xen_LogicalDisk", \
- "Xen_Memory", \
- "Xen_NetworkPort", \
- "Xen_Processor"
- ]
- prop_list = ["%s/%s" % (test_dom, test_disk), test_disk, \
- "%s/%s" % (test_dom, "mem"), test_mem, \
- "%s/%s" % (test_dom, test_mac), test_mac
- ]
- proc_prop = []
- for i in range(test_vcpus):
- proc_prop.append("%s/%s" % (test_dom, i))
- return pllist, cllist, prop_list, proc_prop
+ if virt != 'LXC':
+ dcn = get_typed_class(virt, "LogicalDisk")
+ pcn = get_typed_class(virt, "Processor")
+ ncn = get_typed_class(virt, "NetworkPort")
+
+ disk = {
+ 'SystemName' : test_dom,
+ 'CreationClassName' : dcn,
+ 'DeviceID' : "%s/%s" % (test_dom, test_disk),
+ 'Name' : test_disk
+ }
+ proc = {
+ 'SystemName' : test_dom,
+ 'CreationClassName' : pcn,
+ 'DeviceID' : None
+ }
+ net = {
+ 'SystemName' : test_dom,
+ 'CreationClassName' : ncn,
+ 'DeviceID' : "%s/%s" % (test_dom, test_mac),
+ 'NetworkAddresses' : test_mac
+ }
+
+ eaf_values[pcn] = proc
+ eaf_values[dcn] = disk
+ eaf_values[ncn] = net
+
+ return eaf_values
def get_inst_for_dom(assoc_val):
list = []
@@ -162,196 +129,85 @@ def get_inst_for_dom(assoc_val):
return list
-def get_spec_fields_list(inst_list, field_name):
- global status
- specific_fields = { }
- if (len(inst_list)) != 1:
- Globals.logger.error("Got %s record for Memory/Network/LogicalDisk instead of \
-1", len(inst_list))
- status = FAIL
- return
-# verifying the Name field for LogicalDisk
- try:
- if inst_list[0]['CreationClassName'] != 'Xen_Memory':
- field_value = inst_list[0][field_name]
- if field_name == 'NetworkAddresses':
-# For network we NetworkAddresses is a list of addresses, since we
-# are assigning only one address we are taking field_value[0]
- field_value = field_value[0]
- else:
- field_value = ((int(inst_list[0]['NumberOfBlocks'])*4096)/1024)
- specific_fields = {
- "field_name" : field_name,\
- "field_value" : field_value
- }
- except Exception, detail:
- Globals.logger.error("Exception in get_spec_fields_list(): %s", detail)
- status = FAIL
- return specific_fields
+def verify_eafp_values(server, virt, in_pllist, test_disk):
+ # Looping through the in_pllist to get association for various pools.
+ eafp_values = eafp_list(virt, test_disk)
+ an = get_typed_class(virt, "ElementAllocatedFromPool")
+ for cn, instid in sorted(in_pllist.iteritems()):
+ try:
+ assoc_info = Associators(server, an, cn, virt, InstanceID = instid)
+ assoc_inst_list = get_inst_for_dom(assoc_info)
+ if len(assoc_inst_list) < 1 :
+ logger.error("'%s' with '%s' did not return any records for"
+ " domain: '%s'", an, cn, test_dom)
+ return FAIL
-def assoc_values(assoc_list, field , list, index, specific_fields_list=""):
- """
- Verifying the records retruned by the associations.
- """
- global status
- if field == "CreationClassName":
- for i in range(len(assoc_list)):
- if assoc_list[i][field] != list[index]:
- print_error(field, assoc_list[i][field], list[index])
- status = FAIL
+ assoc_eafp_info = assoc_inst_list[0]
+ CCName = assoc_eafp_info['CreationClassName']
+ if CCName == get_typed_class(virt, 'Processor'):
+ if len(assoc_inst_list) != test_vcpus:
+ logger.error("'%s' should have returned '%i' Processor"
+ " details, got '%i'", an, test_vcpus,
+ len(assoc_inst_list))
+ return FAIL
+
+ for i in range(test_vcpus):
+ eafp_values[CCName]['DeviceID'] = "%s/%s" % (test_dom,i)
+ status = verify_device_values(assoc_inst_list[i],
+ eafp_values, virt)
+ else:
+ status = verify_device_values(assoc_eafp_info,
+ eafp_values, virt)
+
if status != PASS:
- break
- elif field == "DeviceID":
- if assoc_list[0]['CreationClassName'] == 'Xen_Processor':
-# Verifying the list of DeviceId returned by the association
-# against the list created intially .
- for i in range(len(list)):
- if assoc_list[i]['DeviceID'] != list[i]:
- print_error(field, assoc_list[i]['DeviceID'], list[i])
- status = FAIL
- else:
-# Except for Xen_Processor, we get only once record for a domain for
-# other classes.
- if assoc_list[0]['DeviceID'] != list[index]:
- print_error(field, assoc_list[0]['DeviceID'] , list[index])
- status = FAIL
- else:
- # other specific fields verification
- if assoc_list[0]['CreationClassName'] != 'Xen_Processor':
- spec_field_name = specific_fields_list['field_name']
- spec_field_value = specific_fields_list['field_value']
- if spec_field_value != list[index]:
- print_error(field, spec_field_value, list[index])
- status = FAIL
-
+ return status
+ except Exception, detail:
+ logger.error(CIM_ERROR_ASSOCIATORS, an)
+ logger.error("Exception: %s", detail)
+ status = FAIL
+ return status
@do_main(sup_types)
def main():
options = main.options
- global status
loop = 0
server = options.ip
- destroy_and_undefine_all(options.ip)
- test_xml, bridge = testxml_bridge(test_dom, mem = test_mem, vcpus = test_vcpus, \
- mac = test_mac, disk = test_disk, server = options.ip)
- if bridge == None:
- Globals.logger.error("Unable to find virtual bridge")
- return SKIP
+ virt = options.virt
+ if virt == 'Xen':
+ test_disk = 'xvdb'
+ else:
+ test_disk = 'hda'
- if test_xml == None:
- Globals.logger.error("Guest xml was not created properly")
+ # Getting the VS list and deleting the test_dom if it already exists.
+ destroy_and_undefine_all(server)
+ virt_type = get_class(virt)
+ if virt == 'LXC':
+ vsxml = virt_type(test_dom, vcpus = test_vcpus)
+ else:
+ vsxml = virt_type(test_dom, mem = test_mem, vcpus = test_vcpus,
+ mac = test_mac, disk = test_disk)
+
+ # Verify DiskPool on machine
+ status, diskid = create_diskpool_conf(server, virt)
+ if status != PASS:
+ return status
+
+ ret = vsxml.create(server)
+ if not ret:
+ logger.error("Failed to Create the dom: '%s'", test_dom)
+ cleanup_restore(server, virt)
return FAIL
- virt_network = network_by_bridge(bridge, server)
- if virt_network == None:
- Globals.logger.error("No virtual network found for bridge %s", bridge)
- return SKIP
+ # Get pool list against which the EAFP should be queried
+ pllist = init_pllist(virt, vsxml, diskid)
- ret = test_domain_function(test_xml, server, cmd = "create")
- if not ret:
- Globals.logger.error("Failed to Create the dom: %s", test_dom)
- return FAIL
+
+ status = verify_eafp_values(server, virt, pllist, test_disk)
+ vsxml.destroy(server)
+ cleanup_restore(server, virt)
+ return status
- # Taking care of already existing diskconf file
- # Creating diskpool.conf if it does not exist
- # Otherwise backing up the prev file and create new one.
- os.system("rm -f %s" % back_disk_file )
- if not (os.path.exists(disk_file)):
- conf_file()
- else:
- move_file(disk_file, back_disk_file)
- conf_file()
- try :
- disk = get_or_bail(server, id=diskid, \
- pool_class=enumclass.Xen_DiskPool)
- mem = get_or_bail(server, id = memid, \
- pool_class=enumclass.Xen_MemoryPool)
- netid = "%s/%s" % ("NetworkPool", virt_network)
- net = get_or_bail(server, id = netid, \
- pool_class=enumclass.Xen_NetworkPool)
- proc = get_or_bail(server, id = procid, \
- pool_class=enumclass.Xen_ProcessorPool)
-
- except Exception, detail:
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore(server)
- status = FAIL
- ret = test_domain_function(test_dom, server, \
- cmd = "destroy")
- return status
-
- pllist, cllist, prop_list, proc_prop = init_list(server, disk, mem, net, proc)
-
-# Looping through the pllist to get association for various pools.
- for cn, instid in sorted(pllist.items()):
- try:
- assoc_info = assoc.Associators(server, \
- "Xen_ElementAllocatedFromPool", \
- cn, \
- InstanceID = instid)
-# Verifying the Creation Class name for all the records returned for each
-# pool class queried
- inst_list = get_inst_for_dom(assoc_info)
- if (len(inst_list)) == 0:
- Globals.logger.error("Association did not return any records for \
-the specified domain: %s", test_dom)
- status = FAIL
- break
-
- assoc_values(assoc_list=inst_list, field="CreationClassName", \
- list=cllist, \
- index=loop)
-# verifying the DeviceID
- if inst_list[0]['CreationClassName'] == 'Xen_Processor':
-# The DeviceID for the processor varies from 0 to (vcpu - 1 )
- list_index = 0
- assoc_values(assoc_list=inst_list, field="DeviceID", \
- list=proc_prop, \
- index=list_index)
- else:
-# For LogicalDisk, Memory and NetworkPort
- if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk':
- list_index = 0
- elif inst_list[0]['CreationClassName'] == 'Xen_Memory':
- list_index = 2
- else:
- list_index = 4 # NetworkPort
- assoc_values(assoc_list=inst_list, field="DeviceID", \
- list=prop_list, \
- index=list_index)
- if inst_list[0]['CreationClassName'] == 'Xen_LogicalDisk':
-# verifying the Name field for LogicalDisk
- specific_fields = get_spec_fields_list(inst_list,field_name="Name")
- list_index = 1
- elif inst_list[0]['CreationClassName'] == 'Xen_Memory':
-# verifying the NumberOfBlocks allocated for Memory
- specific_fields = get_spec_fields_list(inst_list,field_name="NumberOfBlocks")
- list_index = 3
- else:
-# verifying the NetworkAddresses for the NetworkPort
- specific_fields = get_spec_fields_list(inst_list,field_name="NetworkAddresses")
- list_index = 5 # NetworkPort
- assoc_values(assoc_list=inst_list, field="Other", \
- list=prop_list, \
- index=list_index, \
- specific_fields_list=specific_fields)
- if status != PASS:
- break
- else:
-# The loop variable is used to index the cllist to verify the creationclassname
- loop = loop + 1
- except Exception, detail:
- Globals.logger.error(Globals.CIM_ERROR_ASSOCIATORS, \
- 'Xen_ElementAllocatedFromPool')
- Globals.logger.error("Exception: %s", detail)
- clean_up_restore(server)
- status = FAIL
-
- ret = test_domain_function(test_dom, server, \
- cmd = "destroy")
- clean_up_restore(server)
- return status
if __name__ == "__main__":
sys.exit(main())
diff -r 3703b7be5a10 -r 6d1ac5c3497b suites/libvirt-cim/lib/XenKvmLib/logicaldevices.py
--- a/suites/libvirt-cim/lib/XenKvmLib/logicaldevices.py Wed Jul 16 07:23:32 2008 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/logicaldevices.py Tue Jul 22 06:42:27 2008 -0700
@@ -23,6 +23,7 @@ import os
import os
from CimTest.Globals import logger
from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from XenKvmLib.vxml import get_typed_class
# The list_values that is passed should be of the type ..
# disk = {
@@ -53,70 +54,58 @@ def spec_err(fieldvalue, field_list, fie
logger.error("%s Mismatch", fieldname)
logger.error("Returned %s instead of %s", fieldvalue, field_list[fieldname])
-def verify_proc_values(assoc_info, list_values):
- proc_values = list_values['Xen_Processor']
- if assoc_info['CreationClassName'] != proc_values['CreationClassName']:
- field_err(assoc_info, proc_values, fieldname = 'CreationClassName')
+
+def verify_device_values(assoc_info, list_values, virt='Xen'):
+ dev_cnames = ['LogicalDisk', 'Memory', 'NetworkPort', 'Processor']
+ for i in range(len(dev_cnames)):
+ dev_cnames[i] = get_typed_class(virt, dev_cnames[i])
+
+ CCName = assoc_info['CreationClassName']
+ if not CCName in dev_cnames:
+ logger.error("'%s' seem to be an invalid classname", CCName)
+ return FAIL
+
+ dev_values = list_values[CCName]
+ if assoc_info['CreationClassName'] != dev_values['CreationClassName']:
+ field_err(assoc_info, dev_values, fieldname = 'CreationClassName')
+ return FAIL
+
+ if assoc_info['DeviceID'] != dev_values['DeviceID']:
+ field_err(assoc_info, dev_values, fieldname = 'DeviceID')
+ return FAIL
+
+ sysname = assoc_info['SystemName']
+ if sysname != dev_values['SystemName']:
+ spec_err(sysname, list_values, fieldname = 'SystemName')
return FAIL
- if assoc_info['DeviceID'] != proc_values['DeviceID']:
- field_err(assoc_info, proc_values, fieldname = 'DeviceID')
- return FAIL
- sysname = assoc_info['SystemName']
- if sysname != proc_values['SystemName']:
- spec_err(sysname, proc_values, fieldname = 'SystemName')
- return FAIL
- return PASS
-def verify_mem_values(assoc_info, list_values):
- mem_values = list_values['Xen_Memory']
- if assoc_info['CreationClassName'] != mem_values['CreationClassName']:
- field_err(assoc_info, mem_values, fieldname = 'CreationClassName')
- return FAIL
- if assoc_info['DeviceID'] != mem_values['DeviceID']:
- field_err(assoc_info, mem_values, fieldname = 'DeviceID')
- return FAIL
- sysname = assoc_info['SystemName']
- if sysname != mem_values['SystemName']:
- spec_err(sysname, mem_values, fieldname = 'SystemName')
- return FAIL
+ # Checking Device specific values.
+ if CCName == dev_cnames[0]: # Verifying disk values
+ return verify_disk_values(assoc_info, dev_values, virt)
+ elif CCName == dev_cnames[1]: # Verifying mem values
+ return verify_mem_values(assoc_info, dev_values, virt)
+ elif CCName == dev_cnames[2]: # Verifying net values
+ return verify_net_values(assoc_info, dev_values, virt)
+ elif CCName == dev_cnames[3]: # Verifying processor values
+ return PASS
+
+def verify_mem_values(assoc_info, mem_values, virt='Xen'):
blocks = ((int(assoc_info['NumberOfBlocks'])*4096)/1024)
if blocks != mem_values['NumberOfBlocks']:
spec_err(blocks, mem_values, fieldname = 'NumberOfBlocks')
return FAIL
return PASS
-def verify_net_values(assoc_info, list_values):
- net_values = list_values['Xen_NetworkPort']
- if assoc_info['CreationClassName'] != net_values['CreationClassName']:
- field_err(assoc_info, net_values, fieldname = 'CreationClassName')
- return FAIL
- if assoc_info['DeviceID'] != net_values['DeviceID']:
- field_err(assoc_info, net_values, fieldname = 'DeviceID')
- return FAIL
- sysname = assoc_info['SystemName']
- if sysname != net_values['SystemName']:
- spec_err(sysname, net_values, fieldname = 'SystemName')
- return FAIL
-# We are assinging only one mac address and hence we expect only one
-# address in the list
+def verify_net_values(assoc_info, net_values, virt='Xen'):
+ # We are assinging only one mac address and hence we expect only one
+ # address in the list
netadd = assoc_info['NetworkAddresses'][0]
if netadd != net_values['NetworkAddresses']:
spec_err(netadd, net_values, fieldname = 'NetworkAddresses')
return FAIL
return PASS
-def verify_disk_values(assoc_info, list_values):
- disk_values = list_values['Xen_LogicalDisk']
- if assoc_info['CreationClassName'] != disk_values['CreationClassName']:
- field_err(assoc_info, disk_values, fieldname = 'CreationClassName')
- return FAIL
- if assoc_info['DeviceID'] != disk_values['DeviceID']:
- field_err(assoc_info, disk_values, fieldname = 'DeviceID')
- return FAIL
- sysname = assoc_info['SystemName']
- if sysname != disk_values['SystemName']:
- spec_err(sysname, disk_values, fieldname = 'SystemName')
- return FAIL
+def verify_disk_values(assoc_info, disk_values, virt='Xen'):
devname = assoc_info['Name']
if devname != disk_values['Name']:
spec_err(devname, disk_values, fieldname = 'Name')
16 years, 5 months
Cimtest Report for Xen on RHEL5.2 (2008/07/21)
by Guo Lian Yun
Distro : RHEL 5.2 Beta
Kernel : kernel-2.6.18-92.el5
Xen version : xen-3.0.3-64.el5
Libvirt : libvirt-0.3.3-7.el5
CIMOM : pegasus
PyWBEM : pywbem-3.14
CIM Schema : cimv216Experimental
LibCMPIutil : 83
LibVirtCIM : 637
CIMTEST : 249
=========================================================
PASS : 113
FAILED : 9
XFAIL : 7
SKIP : 1
Total : 130
=======================FAILED==============================
ElementAllocatedFromPool - 02_reverse.py: FAIL
ERROR - AttributeError : 'NoneType' object has no attribute 'InstanceID'
CIM_ERR_NOT_FOUND: No such instance (foo)
LogicalDisk - 02_nodevs.py: FAIL
ERROR - LogicalDisk returned 1 instead of empty list
LogicalDisk - 03_ld_gi_errs.py: FAIL
ERROR - Failed to get instance by the class of Xen_LogicalDisk
ERROR - Exception: (6, u'CIM_ERR_NOT_FOUND: No such instance
(hd_domain/xvda)')
Memory - 01_memory.py: FAIL
ERROR - Capacity should be 262144 MB instead of 131072 MB
NetworkPort - 01_netport.py: FAIL
ERROR - Exception: (6, u'CIM_ERR_NOT_FOUND: No such instance
(test_domain/00:11:22:33:44:55)')
RASD - 02_enum.py: FAIL
ERROR - Xen_DiskResourceAllocationSettingData with VSSDC_dom was not
returned
VSSD - 02_bootldr.py: FAIL
ERROR - NameError : global name 'BaseException' is not defined
CIM_ERR_NOT_FOUND: No such instance (dom)
VSSD - 04_vssd_to_rasd.py: FAIL
ERROR - Xen_VirtualSystemSettingData with VSSDC_dom was not returned
ERROR - Xen_VirtualSystemSettingData returned 0 VSSD objects, expected
only 1
VirtualSystemManagementService - 09_procrasd_persist.py: FAIL
ERROR - limit is 0, expected 256
ERROR - rstest_domain CPU scheduling not set properly
=======================CIMTEST REPORT=======================
AllocationCapabilities - 01_enum.py: PASS
AllocationCapabilities - 02_alloccap_gi_errs.py: PASS
ComputerSystem - 01_enum.py: PASS
ComputerSystem - 02_nosystems.py: PASS
ComputerSystem - 03_defineVS.py: PASS
ComputerSystem - 04_defineStartVS.py: PASS
ComputerSystem - 05_activate_defined_start.py: XFAIL Bug: 00002
ERROR - ERROR: VS DomST1 transition from Defined State to Activate state
was not Successful
Bug:<00002>
ComputerSystem - 06_paused_active_suspend.py: XFAIL Bug: 00002
ERROR - ERROR: VS DomST1 transition from suspend State to Activate state
was not Successful
Bug:<00002>
ComputerSystem - 22_define_suspend.py: PASS
ComputerSystem - 23_suspend_suspend.py: XFAIL Bug: 00002
ERROR - RequestedState should be 2 not 12
ERROR - Attributes for dom test_domain not set as expected.
Bug:<00002>
ComputerSystem - 27_define_suspend_errs.py: PASS
ComputerSystem - 32_start_reboot.py: XFAIL Bug: 00002
ERROR - RequestedState should be 2 not 12
ERROR - Attributes for dom test_domain not set as expected.
Bug:<00002>
ComputerSystem - 33_suspend_reboot.py: XFAIL Bug: 00002
ERROR - RequestedState should be 2 not 12
ERROR - Attributes for dom test_domain not set as expected.
Bug:<00002>
ComputerSystem - 35_start_reset.py: XFAIL Bug: 00002
ERROR - RequestedState should be 2 not 12
ERROR - Attributes for dom test_domain not set as expected.
Bug:<00002>
ComputerSystem - 40_RSC_start.py: XFAIL Bug: 00002
ERROR - RequestedState should be 2 not 12
ERROR - Exception: Attributes were not set as expected for domain:
'test_domain'
Bug:<00002>
ComputerSystem - 41_cs_to_settingdefinestate.py: PASS
ComputerSystem - 42_cs_gi_errs.py: PASS
ComputerSystemIndication - 01_created_indication.py: PASS
ElementAllocatedFromPool - 01_forward.py: PASS
ElementAllocatedFromPool - 02_reverse.py: FAIL
ERROR - AttributeError : 'NoneType' object has no attribute 'InstanceID'
CIM_ERR_NOT_FOUND: No such instance (foo)
ElementAllocatedFromPool - 03_reverse_errs.py: PASS
ElementAllocatedFromPool - 04_forward_errs.py: PASS
ElementCapabilities - 01_forward.py: PASS
ElementCapabilities - 02_reverse.py: PASS
ElementCapabilities - 03_forward_errs.py: PASS
ElementCapabilities - 04_reverse_errs.py: PASS
ElementCapabilities - 05_hostsystem_cap.py: PASS
ElementConforms - 01_forward.py: PASS
ElementConforms - 02_reverse.py: PASS
ElementConforms - 03_ectp_fwd_errs.py: PASS
ElementConforms - 04_ectp_rev_errs.py: PASS
ElementSettingData - 01_forward.py: PASS
ElementSettingData - 03_esd_assoc_with_rasd_errs.py: PASS
EnabledLogicalElementCapabilities - 01_enum.py: PASS
EnabledLogicalElementCapabilities - 02_elecap_gi_errs.py: PASS
HostSystem - 01_enum.py: PASS
HostSystem - 02_hostsystem_to_rasd.py: PASS
HostSystem - 03_hs_to_settdefcap.py: PASS
HostSystem - 04_hs_to_EAPF.py: PASS
HostSystem - 05_hs_gi_errs.py: PASS
HostSystem - 06_hs_to_vsms.py: PASS
HostedDependency - 01_forward.py: PASS
HostedDependency - 02_reverse.py: PASS
HostedDependency - 03_enabledstate.py: PASS
HostedDependency - 04_reverse_errs.py: PASS
HostedResourcePool - 01_forward.py: PASS
HostedResourcePool - 02_reverse.py: PASS
HostedResourcePool - 03_forward_errs.py: PASS
HostedResourcePool - 04_reverse_errs.py: PASS
HostedService - 01_forward.py: PASS
HostedService - 02_reverse.py: PASS
HostedService - 03_forward_errs.py: PASS
HostedService - 04_reverse_errs.py: PASS
LogicalDisk - 01_disk.py: PASS
LogicalDisk - 02_nodevs.py: FAIL
ERROR - LogicalDisk returned 1 instead of empty list
LogicalDisk - 03_ld_gi_errs.py: FAIL
ERROR - Failed to get instance by the class of Xen_LogicalDisk
ERROR - Exception: (6, u'CIM_ERR_NOT_FOUND: No such instance
(hd_domain/xvda)')
Memory - 01_memory.py: FAIL
ERROR - Capacity should be 262144 MB instead of 131072 MB
Memory - 02_defgetmem.py: PASS
Memory - 03_mem_gi_errs.py: PASS
NetworkPort - 01_netport.py: FAIL
ERROR - Exception: (6, u'CIM_ERR_NOT_FOUND: No such instance
(test_domain/00:11:22:33:44:55)')
NetworkPort - 02_np_gi_errors.py: PASS
NetworkPort - 03_user_netport.py: SKIP
Processor - 01_processor.py: PASS
Processor - 02_definesys_get_procs.py: PASS
Processor - 03_proc_gi_errs.py: PASS
Profile - 01_enum.py: PASS
Profile - 02_profile_to_elec.py: PASS
Profile - 03_rprofile_gi_errs.py: PASS
RASD - 01_verify_rasd_fields.py: PASS
RASD - 02_enum.py: FAIL
ERROR - Xen_DiskResourceAllocationSettingData with VSSDC_dom was not
returned
RASD - 03_rasd_errs.py: PASS
ReferencedProfile - 01_verify_refprof.py: PASS
ReferencedProfile - 02_refprofile_errs.py: PASS
ResourceAllocationFromPool - 01_forward.py: PASS
ResourceAllocationFromPool - 02_reverse.py: PASS
ResourceAllocationFromPool - 03_forward_errs.py: PASS
ResourceAllocationFromPool - 04_reverse_errs.py: PASS
ResourceAllocationFromPool - 05_RAPF_err.py: PASS
ResourcePool - 01_enum.py: PASS
ResourcePool - 02_rp_gi_errors.py: PASS
ResourcePoolConfigurationCapabilities - 01_enum.py: PASS
ResourcePoolConfigurationCapabilities - 02_rpcc_gi_errs.py: PASS
ResourcePoolConfigurationService - 01_enum.py: PASS
ResourcePoolConfigurationService - 02_rcps_gi_errors.py: PASS
ResourcePoolConfigurationService - 03_CreateResourcePool.py: PASS
ResourcePoolConfigurationService - 04_CreateChildResourcePool.py: PASS
ResourcePoolConfigurationService - 05_AddResourcesToResourcePool.py: PASS
ResourcePoolConfigurationService - 06_RemoveResourcesFromResourcePool.py:
PASS
ResourcePoolConfigurationService - 07_DeleteResourcePool.py: PASS
SettingsDefine - 01_forward.py: PASS
SettingsDefine - 02_reverse.py: PASS
SettingsDefine - 03_sds_fwd_errs.py: PASS
SettingsDefine - 04_sds_rev_errs.py: PASS
SettingsDefineCapabilities - 01_forward.py: PASS
SettingsDefineCapabilities - 03_forward_errs.py: PASS
SettingsDefineCapabilities - 04_forward_vsmsdata.py: PASS
SettingsDefineCapabilities - 05_reverse_vsmcap.py: PASS
SystemDevice - 01_forward.py: PASS
SystemDevice - 02_reverse.py: PASS
SystemDevice - 03_fwderrs.py: PASS
VSSD - 01_enum.py: PASS
VSSD - 02_bootldr.py: FAIL
ERROR - NameError : global name 'BaseException' is not defined
CIM_ERR_NOT_FOUND: No such instance (dom)
VSSD - 03_vssd_gi_errs.py: PASS
VSSD - 04_vssd_to_rasd.py: FAIL
ERROR - Xen_VirtualSystemSettingData with VSSDC_dom was not returned
ERROR - Xen_VirtualSystemSettingData returned 0 VSSD objects, expected
only 1
VirtualSystemManagementCapabilities - 01_enum.py: PASS
VirtualSystemManagementCapabilities - 02_vsmcap_gi_errs.py: PASS
VirtualSystemManagementService - 01_definesystem_name.py: PASS
VirtualSystemManagementService - 02_destroysystem.py: PASS
VirtualSystemManagementService - 03_definesystem_ess.py: PASS
VirtualSystemManagementService - 04_definesystem_ers.py: PASS
VirtualSystemManagementService - 05_destroysystem_neg.py: PASS
VirtualSystemManagementService - 06_addresource.py: PASS
VirtualSystemManagementService - 07_addresource_neg.py: PASS
VirtualSystemManagementService - 08_modifyresource.py: PASS
VirtualSystemManagementService - 09_procrasd_persist.py: FAIL
ERROR - limit is 0, expected 256
ERROR - rstest_domain CPU scheduling not set properly
VirtualSystemMigrationCapabilities - 01_enum.py: PASS
VirtualSystemMigrationCapabilities - 02_vsmc_gi_errs.py: PASS
VirtualSystemMigrationService - 01_migratable_host.py: PASS
VirtualSystemMigrationService - 02_host_migrate_type.py: PASS
VirtualSystemMigrationService - 05_migratable_host_errs.py: PASS
VirtualSystemMigrationSettingData - 01_enum.py: PASS
VirtualSystemMigrationSettingData - 02_vsmsd_gi_errs.py: PASS
VirtualSystemSettingDataComponent - 01_forward.py: PASS
VirtualSystemSettingDataComponent - 02_reverse.py: PASS
VirtualSystemSettingDataComponent - 03_vssdc_fwd_errs.py: PASS
VirtualSystemSettingDataComponent - 04_vssdc_rev_errs.py: PASS
VirtualSystemSnapshotService - 01_enum.py: PASS
VirtualSystemSnapshotService - 02_vs_sservice_gi_errs.py: PASS
VirtualSystemSnapshotServiceCapabilities - 01_enum.py: PASS
VirtualSystemSnapshotServiceCapabilities - 02_vs_sservicecap_gi_errs.py:
PASS
Best,
Regards
Daisy (运国莲)
VSM Team, China Systems & Technology Labs (CSTL)
E-mail: yunguol(a)cn.ibm.com
TEL: (86)-21-60922403
Building 10, 399 Ke Yuan Rd, Pudong Shanghai, 201203
16 years, 5 months
[PATCH] [TEST] #2 Fix CS 22 to use providers instead of virsh
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1216507114 25200
# Node ID 26723e615b002509d1a91b5669d463b8132fadfc
# Parent 0ebcf05bf74b7a798e99afc8c4361b183fbfc6ee
[TEST] #2 Fix CS 22 to use providers instead of virsh.
This test was defining a guest with virsh and then suspending it with virsh, which doesn't touch the providers in anyway. Now the test calls DefineSystem() and RequestStateChange().
Updates from 1 to 2:
-Add check to verify guest is the expected state after the RequestStateChange() call.
-Create a network pool because the VSMS provider requires a network pool to exist in order to create a guest.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 0ebcf05bf74b -r 26723e615b00 suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py
--- a/suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py Sat Jul 19 15:14:33 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ComputerSystem/22_define_suspend.py Sat Jul 19 15:38:34 2008 -0700
@@ -32,54 +32,72 @@
from XenKvmLib import computersystem
from VirtLib import utils
from XenKvmLib import vxml
-from XenKvmLib.test_doms import destroy_and_undefine_all
-from CimTest.Globals import do_main
-from CimTest import Globals
+from XenKvmLib.test_doms import destroy_and_undefine_domain
+from CimTest.Globals import do_main, logger
from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.common_util import create_using_definesystem, \
+ call_request_state_change, get_cs_instance, \
+ create_netpool_conf, destroy_netpool
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
test_dom = "domgst"
+DEFINE_STATE = 3
+SUSPND_STATE = 9
+TIME = "00000000000000.000000:000"
+
+def chk_state(domain_name, ip, en_state, virt):
+ rc, cs = get_cs_instance(domain_name, ip, virt)
+ if rc != 0:
+ return rc
+
+ if cs.EnabledState != en_state:
+ logger.error("EnabledState should be %d not %d",
+ en_state, cs.EnabledState)
+ return FAIL
+
+ return PASS
+
@do_main(sup_types)
def main():
options = main.options
- status = FAIL
-
- cxml = vxml.get_class(options.virt)(test_dom)
-#define VS
+ status, test_network = create_netpool_conf(options.ip, options.virt)
+ if status != PASS:
+ destroy_netpool(options.ip, options.virt, test_network)
+ return FAIL
+
try:
- ret = cxml.define(options.ip)
- if not ret:
- Globals.logger.error(Globals.VIRSH_ERROR_DEFINE % test_dom)
- return status
-
- cs = computersystem.get_cs_class(options.virt)(options.ip, test_dom)
- if not (cs.Name == test_dom) :
- Globals.logger.error("Error: VS %s not found" % test_dom)
- cxml.undefine(options.ip)
+ # define the vs
+ status = create_using_definesystem(test_dom, options.ip,
+ virt=options.virt)
+ if status != PASS:
+ logger.error("Unable to define %s using DefineSystem()" % test_dom)
+ destroy_netpool(options.ip, options.virt, test_network)
return status
- except Exception, detail:
- Globals.logger.error("Errors: %s" % detail)
+ # suspend the vs
+ status = call_request_state_change(test_dom, options.ip, SUSPND_STATE,
+ TIME, virt=options.virt)
+ if status != PASS:
+ logger.info("Suspending defined %s failed, as expected" % test_dom)
+ status = PASS
-#Suspend the defined VS
-
- try:
- ret = cxml.run_virsh_cmd(options.ip, "suspend")
- if not ret :
- Globals.logger.info("Suspending defined VS %s failed, as expected" \
-% test_dom)
- status = PASS
+ status = chk_state(test_dom, options.ip, DEFINE_STATE, options.virt)
+ if status != PASS:
+ logger.error("%s not in defined state as expected." % test_dom)
+ status = FAIL
+
else :
- Globals.logger.info("Error: Suspending defined VS %s should not \
-have been allowed" % test_dom)
+ logger.error("Suspending defined %s should have failed" % test_dom)
status = FAIL
except Exception, detail:
- Globals.logger.error("Error: %s" % detail)
+ logger.error("Error: %s" % detail)
+ status = FAIL
- ret = cxml.undefine(options.ip)
+ destroy_netpool(options.ip, options.virt, test_network)
+ destroy_and_undefine_domain(test_dom, options.ip, options.virt)
return status
if __name__ == "__main__":
16 years, 5 months
[PATCH] [TEST] #2 Fix potiential false positive in CS 02
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1216654535 25200
# Node ID 29ede6f3ff473886fa928a83fd62a0a192493b7e
# Parent de486893703609719ea79d53ee64828537826211
[TEST] #2 Fix potiential false positive in CS 02.
Also make it so that the test skips if domain_list() returns anything > 0.
This test used to be a Xen only test, so the test was only being run if there were no other guests besides Dom0. However, CS EnumInstances treats Dom0 as any other guest, so it only makes sense to run this test on KVM and LXC.
If computersystem.enumerate() encounters an exception, raise an exception so that the calling test case fail appropriately. We could return FAIL in this case, but raising an exception is more descriptive. Also, other tests that call computersystem.enumerate() don't need to be modified because the number of arguments returned doesn't change.
Updates from 1 to 2:
-Remove Xen and XenFV from the supported platforms list.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r de4868937036 -r 29ede6f3ff47 suites/libvirt-cim/cimtest/ComputerSystem/02_nosystems.py
--- a/suites/libvirt-cim/cimtest/ComputerSystem/02_nosystems.py Sat Jul 19 14:17:09 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ComputerSystem/02_nosystems.py Mon Jul 21 08:35:35 2008 -0700
@@ -27,15 +27,15 @@
from XenKvmLib import computersystem
from VirtLib import live
from VirtLib import utils
-from CimTest.Globals import logger
+from CimTest.Globals import logger, CIM_ERROR_ENUMERATE
from CimTest.Globals import do_main
from CimTest.ReturnCodes import PASS, FAIL, SKIP
-sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
+sup_types = ['KVM', 'LXC']
def clean_system(host, virt):
l = live.domain_list(host, virt)
- if len(l) > 1:
+ if len(l) > 0:
return False
else:
return True
@@ -43,19 +43,28 @@
@do_main(sup_types)
def main():
options = main.options
+
if not clean_system(options.ip, options.virt):
logger.error("System has defined domains; unable to run")
return SKIP
- status = PASS
+ cn = "%s_ComputerSystem" % options.virt
- cs = computersystem.enumerate(options.ip, options.virt)
+ try:
+ cs = computersystem.enumerate(options.ip, options.virt)
- if cs.__class__ == str:
- logger.error("Got error instead of empty list: %s" % cs)
+ except Exception, details:
+ logger.error(CIM_ERROR_ENUMERATE, cn)
+ logger.error(details)
+ return FAIL
+
+ if len(cs) != 0:
+ logger.error("%s returned %d instead of empty list" % (cn, len(cs)))
status = FAIL
+ else:
+ status = PASS
- return status
+ return status
if __name__ == "__main__":
sys.exit(main())
diff -r de4868937036 -r 29ede6f3ff47 suites/libvirt-cim/lib/XenKvmLib/computersystem.py
--- a/suites/libvirt-cim/lib/XenKvmLib/computersystem.py Sat Jul 19 14:17:09 2008 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/computersystem.py Mon Jul 21 08:35:35 2008 -0700
@@ -85,7 +85,7 @@
try:
instances = conn.EnumerateInstances(classname)
except pywbem.CIMError, arg:
- print arg[1]
+ raise Exception(arg[1])
return []
list = []
16 years, 5 months