# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1220411704 25200
# Node ID 41ee8a3bcd8d950602659a59b6186e71d09c2144
# Parent 2eddf83bfeaec1e4729ef4209d76dc39155f629b
[TEST] Remove diskpool creation from AC and EAFP tests.
The diskpool is now being created before the tests are run.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py
--- a/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Thu Sep 04 13:14:01
2008 -0700
+++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Tue Sep 02 20:15:04
2008 -0700
@@ -31,7 +31,7 @@
from XenKvmLib.const import do_main, platform_sup
from CimTest.Globals import logger, CIM_ERROR_ENUMERATE
from CimTest.ReturnCodes import PASS, FAIL
-from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
+from XenKvmLib.const import default_pool_name
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
@@ -87,11 +87,6 @@
cn = 'AllocationCapabilities'
- status, diskid = create_diskpool_conf(options.ip, options.virt)
- if status != PASS:
- cleanup_restore(options.ip, options.virt)
- return FAIL
-
pools, ac = enum_pools_and_ac(options.ip, options.virt, cn)
if len(pools) < 4:
logger.error("Only %d pools returned, expected at least 4" %
len(pools))
@@ -100,8 +95,6 @@
status = compare_pool_to_ac(ac, pools, cn)
- cleanup_restore(options.ip, options.virt)
-
return status
if __name__ == "__main__":
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py
--- a/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Thu Sep 04
13:14:01 2008 -0700
+++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Tue Sep 02
20:15:04 2008 -0700
@@ -55,17 +55,15 @@
from XenKvmLib import assoc
from VirtLib import utils
from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS
-from CimTest.ReturnCodes import PASS, SKIP
+from CimTest.ReturnCodes import PASS, SKIP, FAIL
from XenKvmLib.common_util import try_getinstance
from VirtLib.live import net_list
from XenKvmLib.test_xml import netxml
from XenKvmLib.test_doms import create_vnet
-from XenKvmLib.const import do_main, platform_sup
+from XenKvmLib.const import do_main, platform_sup, default_pool_name
from XenKvmLib.classes import get_typed_class
-from XenKvmLib.common_util import cleanup_restore, test_dpath, \
-create_diskpool_file
-diskid = "%s/%s" % ("DiskPool", test_dpath)
+diskid = "%s/%s" % ("DiskPool", default_pool_name)
memid = "%s/%s" % ("MemoryPool", 0)
procid = "%s/%s" % ("ProcessorPool", 0)
@@ -76,10 +74,6 @@
options = main.options
server = options.ip
virt = options.virt
- # Verify DiskPool on machine
- status = create_diskpool_file()
- if status != PASS:
- return status
#Verify the virtual Network on the machine
vir_network = net_list(server)
@@ -115,20 +109,19 @@
expr_values=exp['invalid_keyvalue'],
bug_no="")
if ret_value != PASS:
logger.error("------ FAILED: Invalid InstanceID Key Value.------")
- status = ret_value
+ return ret_value
field = 'INVALID_Instid_KeyName'
+ status = FAIL
for i in range(len(instid_list)):
keys = { field : instid_list[i] }
- ret_value = try_getinstance(conn, classname, keys, field_name=field,
+ status = try_getinstance(conn, classname, keys, field_name=field,
expr_values=exp['invalid_keyname'],
bug_no="")
- if ret_value != PASS:
+ if status != PASS:
logger.error("------ FAILED: Invalid InstanceID Key Name.------")
- status = ret_value
- if status != PASS:
break
- cleanup_restore(server, virt)
+
return status
if __name__ == "__main__":
sys.exit(main())
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Thu Sep 04
13:14:01 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Tue Sep 02
20:15:04 2008 -0700
@@ -48,11 +48,10 @@
from XenKvmLib.enumclass import getInstance
from CimTest.Globals import CIM_ERROR_ASSOCIATORS, CIM_ERROR_GETINSTANCE
from XenKvmLib.vxml import get_class
-from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore
from XenKvmLib.classes import get_typed_class
from XenKvmLib.logicaldevices import field_err
from CimTest.Globals import logger
-from XenKvmLib.const import do_main
+from XenKvmLib.const import do_main, default_pool_name
from CimTest.ReturnCodes import PASS, FAIL
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
@@ -82,7 +81,7 @@
if virt != 'LXC':
virt_network = vsxml.xml_get_net_network()
keys = {
- 'DiskPool' : diskid,
+ 'DiskPool' : 'DiskPool/%s' % diskid,
'ProcessorPool' : 'ProcessorPool/0' ,
'MemoryPool' : 'MemoryPool/0',
'NetworkPool' : 'NetworkPool/%s' %virt_network
@@ -96,7 +95,6 @@
key_list = {"InstanceID" : k}
inst = get_inst(server, virt, cn, key_list)
if inst is None:
- cleanup_restore(server, virt)
vsxml.destroy(server)
return FAIL, gi_inst_list
cn = get_typed_class(virt, cn)
@@ -134,7 +132,6 @@
except Exception, detail:
logger.error(CIM_ERROR_ASSOCIATORS, an)
logger.error("Exception: %s", detail)
- cleanup_restore(server, virt)
return FAIL
return PASS
@@ -158,10 +155,6 @@
vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac,
disk = test_disk)
- # Verify DiskPool on machine
- status, diskid = create_diskpool_conf(server, virt)
- if status != PASS:
- return status
ret = vsxml.create(server)
if not ret:
logger.error("Failed to Create the dom: '%s'", test_dom)
@@ -180,12 +173,12 @@
ldlist[net_cn] = "%s/%s" % (test_dom, test_mac)
ldlist[proc_cn] = "%s/%s" % (test_dom, "0")
- status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid)
+ status, gi_inst_list = get_pool_details(server, virt, vsxml,
+ default_pool_name)
if status != PASS:
return status
status = verify_eafp_values(server, virt, ldlist, gi_inst_list)
- cleanup_restore(server, virt)
vsxml.destroy(server)
return status
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Thu Sep 04
13:14:01 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Tue Sep 02
20:15:04 2008 -0700
@@ -54,8 +54,8 @@
from CimTest.ReturnCodes import PASS, FAIL
from XenKvmLib.test_doms import destroy_and_undefine_all
from XenKvmLib.classes import get_typed_class
-from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore
from XenKvmLib.logicaldevices import verify_device_values
+from XenKvmLib.const import default_pool_name
sup_types = ['Xen' , 'KVM', 'XenFV', 'LXC']
@@ -70,7 +70,7 @@
}
if virt != 'LXC':
virt_network = vsxml.xml_get_net_network()
- keys['DiskPool'] = diskid
+ keys['DiskPool'] = 'DiskPool/%s' % default_pool_name
keys['ProcessorPool'] = 'ProcessorPool/0'
keys['NetworkPool'] = 'NetworkPool/%s' %virt_network
@@ -190,24 +190,17 @@
vsxml = virt_type(test_dom, mem = test_mem, vcpus = test_vcpus,
mac = test_mac, disk = test_disk)
- # Verify DiskPool on machine
- status, diskid = create_diskpool_conf(server, virt)
- if status != PASS:
- return status
-
ret = vsxml.create(server)
if not ret:
logger.error("Failed to Create the dom: '%s'", test_dom)
- cleanup_restore(server, virt)
return FAIL
# Get pool list against which the EAFP should be queried
- pllist = init_pllist(virt, vsxml, diskid)
+ pllist = init_pllist(virt, vsxml, default_pool_name)
status = verify_eafp_values(server, virt, pllist, test_disk)
vsxml.destroy(server)
- cleanup_restore(server, virt)
return status
if __name__ == "__main__":
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Thu Sep 04
13:14:01 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Tue Sep 02
20:15:04 2008 -0700
@@ -36,17 +36,15 @@
from XenKvmLib.common_util import try_assoc
from CimTest.ReturnCodes import PASS, FAIL
from CimTest.Globals import logger
-from XenKvmLib.const import do_main, platform_sup
+from XenKvmLib.const import do_main, platform_sup, default_pool_name
from XenKvmLib.vxml import get_class
from XenKvmLib.classes import get_typed_class
-from XenKvmLib.common_util import cleanup_restore, test_dpath, \
-create_diskpool_file
bug_no = "88651"
test_dom = "hd_domain"
test_mac = "00:11:22:33:44:aa"
test_vcpus = 1
-id1 = "DiskPool/%s" %test_dpath
+id1 = "DiskPool/%s" % default_pool_name
id2 = "MemoryPool/0"
id3 = "NetworkPool/xenbr0"
id4 = "ProcessorPool/0"
@@ -167,7 +165,6 @@
def clean_and_exit(server, virt, msg):
logger.error("------FAILED: Invalid %s.------", msg)
- cleanup_restore(server, virt)
vsxml.undefine(server)
@do_main(platform_sup)
@@ -188,10 +185,6 @@
vsxml = virt_type (test_dom, vcpus = test_vcpus, mac = test_mac,
disk = test_disk)
- # Verify DiskPool on machine
- status = create_diskpool_file()
- if status != PASS:
- return status
ret = vsxml.define(options.ip)
if not ret:
logger.error("Failed to define the dom: %s", test_dom)
@@ -215,7 +208,6 @@
clean_and_exit(options.ip, virt, "CCName")
return ret
- cleanup_restore(options.ip, virt)
vsxml.undefine(options.ip)
return PASS
if __name__ == "__main__":
diff -r 2eddf83bfeae -r 41ee8a3bcd8d
suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py
--- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Thu Sep 04
13:14:01 2008 -0700
+++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Tue Sep 02
20:15:04 2008 -0700
@@ -36,12 +36,10 @@
from CimTest import Globals
from CimTest.Globals import logger
from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC
-from XenKvmLib.const import do_main, platform_sup
+from XenKvmLib.const import do_main, platform_sup, default_network_name, \
+ default_pool_name
from XenKvmLib.vxml import get_class
from XenKvmLib.classes import get_typed_class
-from XenKvmLib.common_util import cleanup_restore, test_dpath, \
-create_diskpool_file
-from XenKvmLib.const import default_network_name
sup_types = ['Xen', 'KVM', 'XenFV']
bug_no = "88651"
@@ -480,7 +478,6 @@
def clean_and_exit(server, virt, msg):
logger.error("------FAILED: Invalid %s.------", msg)
- cleanup_restore(server, virt)
vsxml.undefine(server)
@do_main(platform_sup)
@@ -503,10 +500,6 @@
destroy_and_undefine_all(options.ip)
vsxml = get_class(virt)(test_dom, vcpus = test_vcpus, mac = test_mac, \
disk = test_disk)
- # Verify DiskPool on machine
- status = create_diskpool_file()
- if status != PASS:
- return status
bridge = vsxml.set_vbridge(options.ip, default_network_name)
ret = vsxml.define(options.ip)
@@ -563,7 +556,6 @@
clean_and_exit(options.ip, virt, "System creationclassname Keyvalue")
return ret
- cleanup_restore(options.ip, virt)
vsxml.undefine(options.ip)
return PASS
if __name__ == "__main__":