[PATCH 0 of 5] Create diskpool before running any tests.

In addition to creating a network pool before running the tests, this patch set creates a diskpool.

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220559239 25200 # Node ID 4b80cfce163204588ff4195debe8fdbe94209a0c # Parent 8abcd820b6b37e5fbe8ccc30734cefa908dfab78 [TEST] Add destroy_diskpool()... Also: Modify create_diskpool_conf() to take a disk pool name. Give default_pool_name a unique name. Modify create_diskpool() so that it will use the existing diskpool only if specified. Also, have it check to see if the specified diskpool exists before attempting to create it. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/common_util.py --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py Thu Sep 04 13:13:59 2008 -0700 @@ -39,9 +39,8 @@ from XenKvmLib.vxml import PoolXML, NetXML from XenKvmLib.enumclass import getInstance from VirtLib import utils -from XenKvmLib import const +from XenKvmLib.const import default_pool_name, default_network_name -test_dpath = "foo" disk_file = '/etc/libvirt/diskpool.conf' back_disk_file = disk_file + "." + "backup" @@ -308,7 +307,7 @@ logger.info("Disk conf file : %s", disk_file) try: f = open(disk_file, 'w') - f.write('%s %s' % (test_dpath, '/')) + f.write('%s %s' % (default_pool_name, '/')) f.close() except Exception,detail: logger.error("Exception: %s", detail) @@ -354,19 +353,28 @@ return conf_file() -def create_diskpool(server, virt='KVM'): +def create_diskpool(server, virt='KVM', dpool=default_pool_name, + useExisting=False): status = PASS dpoolname = None try: - dpool_list = diskpool_list(server, virt='KVM') - if len(dpool_list) > 0: - dpoolname=dpool_list[0] - else: - diskxml = PoolXML(server, virt=virt) + if useExisting == True: + dpool_list = diskpool_list(server, virt='KVM') + if len(dpool_list) > 0: + dpoolname=dpool_list[0] + + if dpoolname == None: + cmd = "virsh -c %s pool-list --all | grep %s" % \ + (utils.virt2uri(virt), dpool) + ret, out = utils.run_remote(server, cmd) + if out != "": + logger.error("Disk pool with name '%s' already exists", dpool) + return FAIL, "Unknown" + + diskxml = PoolXML(server, virt=virt, poolname=dpool) ret = diskxml.create_vpool() if not ret: - logger.error('Failed to create the disk pool "%s"', - dpoolname) + logger.error('Failed to create the disk pool "%s"', dpool) status = FAIL else: dpoolname=diskxml.xml_get_diskpool_name() @@ -375,20 +383,40 @@ status=FAIL return status, dpoolname -def create_diskpool_conf(server, virt): +def create_diskpool_conf(server, virt, dpool=default_pool_name): libvirt_version = virsh_version(server, virt) if libvirt_version >= '0.4.1': - status, dpoolname = create_diskpool(server, virt=virt) + status, dpoolname = create_diskpool(server, virt, dpool) diskid = "%s/%s" % ("DiskPool", dpoolname) else: status = create_diskpool_file() - diskid = "%s/%s" % ("DiskPool", test_dpath) + diskid = "DiskPool/%s" % default_pool_name return status, diskid +def destroy_diskpool(server, virt, dpool): + libvirt_version = virsh_version(server, virt) + if libvirt_version >= '0.4.1': + if dpool == None: + logger.error("No disk pool specified") + return FAIL + + pool_xml = PoolXML(server, virt=virt, poolname=dpool) + ret = pool_xml.destroy_vpool() + if not ret: + logger.error("Failed to destroy disk pool '%s'", dpool) + return FAIL + + else: + status = cleanup_restore(server, virt) + if status != PASS: + logger.error("Failed to restore original disk pool file") + return status + + return PASS def create_netpool_conf(server, virt, use_existing=False, - net_name=const.default_network_name): + net_name=default_network_name): status = PASS test_network = None try: diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/const.py --- a/suites/libvirt-cim/lib/XenKvmLib/const.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/const.py Thu Sep 04 13:13:59 2008 -0700 @@ -37,7 +37,7 @@ default_net_type = 'network' #vxml.PoolXML -default_pool_name = 'testpool' +default_pool_name = 'cimtest-diskpool' # vxml.VirtXML default_domname = 'domU1' diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Thu Sep 04 13:13:59 2008 -0700 @@ -145,14 +145,12 @@ self.vuri = 'lxc:///system' def run(self, ip, vcmd, param): - file_arg_cmds = ['define', 'create', 'net-create', 'pool-create', 'pool-destroy'] + file_arg_cmds = ['define', 'create', 'net-create', 'pool-create'] if vcmd in file_arg_cmds: ntf = tempfile.NamedTemporaryFile('w') ntf.write(param) ntf.flush() name = ntf.name - elif vcmd == 'pool-destroy': - name = param elif param is None: name = "" else:

Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220559239 25200 # Node ID 4b80cfce163204588ff4195debe8fdbe94209a0c # Parent 8abcd820b6b37e5fbe8ccc30734cefa908dfab78 [TEST] Add destroy_diskpool()...
Also:
Modify create_diskpool_conf() to take a disk pool name. Give default_pool_name a unique name. Modify create_diskpool() so that it will use the existing diskpool only if specified. Also, have it check to see if the specified diskpool exists before attempting to create it.
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/common_util.py --- a/suites/libvirt-cim/lib/XenKvmLib/common_util.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/common_util.py Thu Sep 04 13:13:59 2008 -0700 @@ -39,9 +39,8 @@ from XenKvmLib.vxml import PoolXML, NetXML from XenKvmLib.enumclass import getInstance from VirtLib import utils -from XenKvmLib import const +from XenKvmLib.const import default_pool_name, default_network_name
-test_dpath = "foo" disk_file = '/etc/libvirt/diskpool.conf'
back_disk_file = disk_file + "." + "backup" @@ -308,7 +307,7 @@ logger.info("Disk conf file : %s", disk_file) try: f = open(disk_file, 'w') - f.write('%s %s' % (test_dpath, '/')) + f.write('%s %s' % (default_pool_name, '/')) f.close() except Exception,detail: logger.error("Exception: %s", detail) @@ -354,19 +353,28 @@
return conf_file()
-def create_diskpool(server, virt='KVM'): +def create_diskpool(server, virt='KVM', dpool=default_pool_name, + useExisting=False): status = PASS dpoolname = None try: - dpool_list = diskpool_list(server, virt='KVM') - if len(dpool_list) > 0: - dpoolname=dpool_list[0] - else: - diskxml = PoolXML(server, virt=virt) + if useExisting == True: + dpool_list = diskpool_list(server, virt='KVM') + if len(dpool_list) > 0: + dpoolname=dpool_list[0] + + if dpoolname == None: + cmd = "virsh -c %s pool-list --all | grep %s" % \ + (utils.virt2uri(virt), dpool) + ret, out = utils.run_remote(server, cmd) + if out != "": + logger.error("Disk pool with name '%s' already exists", dpool) + return FAIL, "Unknown"
If the diskpool cinmtest-diskpool already exist on the machine then the tc execution wont proceed unless we delete manually and then re-run the tc. I think we should not pass FAIL as status value, instead supply PASS as the status value. The same comment applies for network pool also.
+ + diskxml = PoolXML(server, virt=virt, poolname=dpool) ret = diskxml.create_vpool() if not ret: - logger.error('Failed to create the disk pool "%s"', - dpoolname) + logger.error('Failed to create the disk pool "%s"', dpool) status = FAIL else: dpoolname=diskxml.xml_get_diskpool_name() @@ -375,20 +383,40 @@ status=FAIL return status, dpoolname
-def create_diskpool_conf(server, virt): +def create_diskpool_conf(server, virt, dpool=default_pool_name): libvirt_version = virsh_version(server, virt) if libvirt_version >= '0.4.1': - status, dpoolname = create_diskpool(server, virt=virt) + status, dpoolname = create_diskpool(server, virt, dpool) diskid = "%s/%s" % ("DiskPool", dpoolname) else: status = create_diskpool_file() - diskid = "%s/%s" % ("DiskPool", test_dpath) + diskid = "DiskPool/%s" % default_pool_name
return status, diskid
+def destroy_diskpool(server, virt, dpool): + libvirt_version = virsh_version(server, virt) + if libvirt_version >= '0.4.1': + if dpool == None: + logger.error("No disk pool specified") + return FAIL + + pool_xml = PoolXML(server, virt=virt, poolname=dpool) + ret = pool_xml.destroy_vpool() + if not ret: + logger.error("Failed to destroy disk pool '%s'", dpool) + return FAIL + + else: + status = cleanup_restore(server, virt) + if status != PASS: + logger.error("Failed to restore original disk pool file") + return status + + return PASS
def create_netpool_conf(server, virt, use_existing=False, - net_name=const.default_network_name): + net_name=default_network_name): status = PASS test_network = None try: diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/const.py --- a/suites/libvirt-cim/lib/XenKvmLib/const.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/const.py Thu Sep 04 13:13:59 2008 -0700 @@ -37,7 +37,7 @@ default_net_type = 'network'
#vxml.PoolXML -default_pool_name = 'testpool' +default_pool_name = 'cimtest-diskpool'
# vxml.VirtXML default_domname = 'domU1' diff -r 8abcd820b6b3 -r 4b80cfce1632 suites/libvirt-cim/lib/XenKvmLib/vxml.py --- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Fri Sep 05 02:47:24 2008 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Thu Sep 04 13:13:59 2008 -0700 @@ -145,14 +145,12 @@ self.vuri = 'lxc:///system'
def run(self, ip, vcmd, param): - file_arg_cmds = ['define', 'create', 'net-create', 'pool-create', 'pool-destroy'] + file_arg_cmds = ['define', 'create', 'net-create', 'pool-create'] if vcmd in file_arg_cmds: ntf = tempfile.NamedTemporaryFile('w') ntf.write(param) ntf.flush() name = ntf.name - elif vcmd == 'pool-destroy': - name = param elif param is None: name = "" else:
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

+def create_diskpool(server, virt='KVM', dpool=default_pool_name, + useExisting=False): status = PASS dpoolname = None try: - dpool_list = diskpool_list(server, virt='KVM') - if len(dpool_list) > 0: - dpoolname=dpool_list[0] - else: - diskxml = PoolXML(server, virt=virt) + if useExisting == True: + dpool_list = diskpool_list(server, virt='KVM') + if len(dpool_list) > 0: + dpoolname=dpool_list[0] + + if dpoolname == None: + cmd = "virsh -c %s pool-list --all | grep %s" % \ + (utils.virt2uri(virt), dpool) + ret, out = utils.run_remote(server, cmd) + if out != "": + logger.error("Disk pool with name '%s' already exists", dpool) + return FAIL, "Unknown"
If the diskpool cinmtest-diskpool already exist on the machine then the tc execution wont proceed unless we delete manually and then re-run the tc. I think we should not pass FAIL as status value, instead supply PASS as the status value. The same comment applies for network pool also.
I'm not sure I understand why we should return PASS if the diskpool already exists? The purpose of this function is to create a diskpool with a specific XML. If a diskpool with the same name already exists on the system, we cannot guarantee that it was created with the same XML. That is, the pool might have the same name, but it might be entirely different from the pool we want to create. If the caller wants to use an existing pool, the caller can use the useExisting param to do so. Is there a scenario you were thinking of where returning PASS if the pool already exists would be useful? -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
+def create_diskpool(server, virt='KVM', dpool=default_pool_name, + useExisting=False): status = PASS dpoolname = None try: - dpool_list = diskpool_list(server, virt='KVM') - if len(dpool_list) > 0: - dpoolname=dpool_list[0] - else: - diskxml = PoolXML(server, virt=virt) + if useExisting == True: + dpool_list = diskpool_list(server, virt='KVM') + if len(dpool_list) > 0: + dpoolname=dpool_list[0] + + if dpoolname == None: + cmd = "virsh -c %s pool-list --all | grep %s" % \ + (utils.virt2uri(virt), dpool) + ret, out = utils.run_remote(server, cmd) + if out != "": + logger.error("Disk pool with name '%s' already exists", dpool) + return FAIL, "Unknown"
If the diskpool cinmtest-diskpool already exist on the machine then the tc execution wont proceed unless we delete manually and then re-run the tc. I think we should not pass FAIL as status value, instead supply PASS as the status value. The same comment applies for network pool also.
I'm not sure I understand why we should return PASS if the diskpool already exists?
The purpose of this function is to create a diskpool with a specific XML. If a diskpool with the same name already exists on the system, we cannot guarantee that it was created with the same XML. That is, the pool might have the same name, but it might be entirely different from the pool we want to create. Ok this is a valid point which I did not consider.
If the caller wants to use an existing pool, the caller can use the useExisting param to do so.
Is there a scenario you were thinking of where returning PASS if the pool already exists would be useful?
I wanted to return a PASS value, in case the pool with the same name already existed . But since you gave a valid scenario, the above comment from me does not hold valid. Thanks and Regards, Deepti.

If the diskpool cinmtest-diskpool already exist on the machine then the tc execution wont proceed unless we delete manually and then re-run the tc. I think we should not pass FAIL as status value, instead supply PASS as the status value. The same comment applies for network pool also.
I'm not sure I understand why we should return PASS if the diskpool already exists?
The purpose of this function is to create a diskpool with a specific XML. If a diskpool with the same name already exists on the system, we cannot guarantee that it was created with the same XML. That is, the pool might have the same name, but it might be entirely different from the pool we want to create. Ok this is a valid point which I did not consider.
If the caller wants to use an existing pool, the caller can use the useExisting param to do so.
Is there a scenario you were thinking of where returning PASS if the pool already exists would be useful?
I wanted to return a PASS value, in case the pool with the same name already existed . But since you gave a valid scenario, the above comment from me does not hold valid.
Okay, just wanted to make sure there wasn't a scenario I was missing. Is this patch set ready to go in? -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
If the diskpool cinmtest-diskpool already exist on the machine then the tc execution wont proceed unless we delete manually and then re-run the tc. I think we should not pass FAIL as status value, instead supply PASS as the status value. The same comment applies for network pool also.
I'm not sure I understand why we should return PASS if the diskpool already exists?
The purpose of this function is to create a diskpool with a specific XML. If a diskpool with the same name already exists on the system, we cannot guarantee that it was created with the same XML. That is, the pool might have the same name, but it might be entirely different from the pool we want to create. Ok this is a valid point which I did not consider.
If the caller wants to use an existing pool, the caller can use the useExisting param to do so.
Is there a scenario you were thinking of where returning PASS if the pool already exists would be useful?
I wanted to return a PASS value, in case the pool with the same name already existed . But since you gave a valid scenario, the above comment from me does not hold valid.
Okay, just wanted to make sure there wasn't a scenario I was missing. Is this patch set ready to go in? +1 for me.

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220559241 25200 # Node ID 2eddf83bfeaec1e4729ef4209d76dc39155f629b # Parent 4b80cfce163204588ff4195debe8fdbe94209a0c [TEST] Create a disk pool before running any tests. Be sure to clean this disk pool up after all the tests are done running. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 4b80cfce1632 -r 2eddf83bfeae suites/libvirt-cim/main.py --- a/suites/libvirt-cim/main.py Thu Sep 04 13:13:59 2008 -0700 +++ b/suites/libvirt-cim/main.py Thu Sep 04 13:14:01 2008 -0700 @@ -31,11 +31,13 @@ from VirtLib import groups import ConfigParser sys.path.append('./lib') -from XenKvmLib.const import platform_sup, default_network_name +from XenKvmLib.const import platform_sup, default_network_name, \ + default_pool_name from XenKvmLib.reporting import gen_report, send_report from VirtLib import utils from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.common_util import create_netpool_conf, destroy_netpool +from XenKvmLib.common_util import create_netpool_conf, destroy_netpool, \ + create_diskpool_conf, destroy_diskpool parser = OptionParser() parser.add_option("-i", "--ip", dest="ip", default="localhost", @@ -131,12 +133,25 @@ print "\nUnable to create network pool %s" % default_network_name return status + status, dpool = create_diskpool_conf(ip, virt, dpool=default_pool_name) + if status != PASS: + print "\nUnable to create disk pool %s" % default_pool_name + status = destroy_netpool(ip, virt, default_network_name) + if status != PASS: + print "\nUnable to destroy network pool %s." % default_network_name + return FAIL + return PASS def cleanup_env(ip, virt): status = destroy_netpool(ip, virt, default_network_name) if status != PASS: print "Unable to destroy network pool %s." % default_network_name + return status + + status = destroy_diskpool(ip, virt, default_pool_name) + if status != PASS: + print "Unable to destroy disk pool %s." % default_pool_name return status return PASS

Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220559241 25200 # Node ID 2eddf83bfeaec1e4729ef4209d76dc39155f629b # Parent 4b80cfce163204588ff4195debe8fdbe94209a0c [TEST] Create a disk pool before running any tests.
Be sure to clean this disk pool up after all the tests are done running.
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 4b80cfce1632 -r 2eddf83bfeae suites/libvirt-cim/main.py --- a/suites/libvirt-cim/main.py Thu Sep 04 13:13:59 2008 -0700 +++ b/suites/libvirt-cim/main.py Thu Sep 04 13:14:01 2008 -0700 @@ -31,11 +31,13 @@ from VirtLib import groups import ConfigParser sys.path.append('./lib') -from XenKvmLib.const import platform_sup, default_network_name +from XenKvmLib.const import platform_sup, default_network_name, \ + default_pool_name from XenKvmLib.reporting import gen_report, send_report from VirtLib import utils from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.common_util import create_netpool_conf, destroy_netpool +from XenKvmLib.common_util import create_netpool_conf, destroy_netpool, \ + create_diskpool_conf, destroy_diskpool
parser = OptionParser() parser.add_option("-i", "--ip", dest="ip", default="localhost", @@ -131,12 +133,25 @@ print "\nUnable to create network pool %s" % default_network_name return status
+ status, dpool = create_diskpool_conf(ip, virt, dpool=default_pool_name) + if status != PASS: + print "\nUnable to create disk pool %s" % default_pool_name + status = destroy_netpool(ip, virt, default_network_name) + if status != PASS: + print "\nUnable to destroy network pool %s." % default_network_name + return FAIL
The test wont proceed if the cimtest-diskpool already exist on the machine, unless we manually delete the same.
+ return PASS
def cleanup_env(ip, virt): status = destroy_netpool(ip, virt, default_network_name) if status != PASS: print "Unable to destroy network pool %s." % default_network_name + return status + + status = destroy_diskpool(ip, virt, default_pool_name) + if status != PASS: + print "Unable to destroy disk pool %s." % default_pool_name return status
return PASS
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

+ status, dpool = create_diskpool_conf(ip, virt, dpool=default_pool_name) + if status != PASS: + print "\nUnable to create disk pool %s" % default_pool_name + status = destroy_netpool(ip, virt, default_network_name) + if status != PASS: + print "\nUnable to destroy network pool %s." % default_network_name + return FAIL
The test wont proceed if the cimtest-diskpool already exist on the machine, unless we manually delete the same.
Yes, that's the intended behavior here. The test suite prints out a message to the user that a diskpool with the name name already exists. The user will need to remove the pool manually or rename the pool. Instead of attempting to guess a poolname that is not in use, we require a specific name to be available. I think it is reasonable because the name we are attempting to use is fairly unique. Also, if the test suite is unable to clean up pools properly, the system becomes littered with tons of pools the suite has generated. I'd rather use known, specific name for the test suite. If the pool already exists, the user can then modify their environment so that the test suite can run. Does this seem reasonable? -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
+ status, dpool = create_diskpool_conf(ip, virt, dpool=default_pool_name) + if status != PASS: + print "\nUnable to create disk pool %s" % default_pool_name + status = destroy_netpool(ip, virt, default_network_name) + if status != PASS: + print "\nUnable to destroy network pool %s." % default_network_name + return FAIL
The test wont proceed if the cimtest-diskpool already exist on the machine, unless we manually delete the same.
Yes, that's the intended behavior here. The test suite prints out a message to the user that a diskpool with the name name already exists. The user will need to remove the pool manually or rename the pool.
Instead of attempting to guess a poolname that is not in use, we require a specific name to be available. I think it is reasonable because the name we are attempting to use is fairly unique. Also, if the test suite is unable to clean up pools properly, the system becomes littered with tons of pools the suite has generated.
I'd rather use known, specific name for the test suite. If the pool already exists, the user can then modify their environment so that the test suite can run.
Does this seem reasonable? I agree with the above. Sounds valid. Thanks and Regards, Deepti.

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220411704 25200 # Node ID 41ee8a3bcd8d950602659a59b6186e71d09c2144 # Parent 2eddf83bfeaec1e4729ef4209d76dc39155f629b [TEST] Remove diskpool creation from AC and EAFP tests. The diskpool is now being created before the tests are run. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py --- a/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Tue Sep 02 20:15:04 2008 -0700 @@ -31,7 +31,7 @@ from XenKvmLib.const import do_main, platform_sup from CimTest.Globals import logger, CIM_ERROR_ENUMERATE from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf +from XenKvmLib.const import default_pool_name sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @@ -87,11 +87,6 @@ cn = 'AllocationCapabilities' - status, diskid = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - cleanup_restore(options.ip, options.virt) - return FAIL - pools, ac = enum_pools_and_ac(options.ip, options.virt, cn) if len(pools) < 4: logger.error("Only %d pools returned, expected at least 4" % len(pools)) @@ -100,8 +95,6 @@ status = compare_pool_to_ac(ac, pools, cn) - cleanup_restore(options.ip, options.virt) - return status if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py --- a/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -55,17 +55,15 @@ from XenKvmLib import assoc from VirtLib import utils from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS -from CimTest.ReturnCodes import PASS, SKIP +from CimTest.ReturnCodes import PASS, SKIP, FAIL from XenKvmLib.common_util import try_getinstance from VirtLib.live import net_list from XenKvmLib.test_xml import netxml from XenKvmLib.test_doms import create_vnet -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file -diskid = "%s/%s" % ("DiskPool", test_dpath) +diskid = "%s/%s" % ("DiskPool", default_pool_name) memid = "%s/%s" % ("MemoryPool", 0) procid = "%s/%s" % ("ProcessorPool", 0) @@ -76,10 +74,6 @@ options = main.options server = options.ip virt = options.virt - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status #Verify the virtual Network on the machine vir_network = net_list(server) @@ -115,20 +109,19 @@ expr_values=exp['invalid_keyvalue'], bug_no="") if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Value.------") - status = ret_value + return ret_value field = 'INVALID_Instid_KeyName' + status = FAIL for i in range(len(instid_list)): keys = { field : instid_list[i] } - ret_value = try_getinstance(conn, classname, keys, field_name=field, + status = try_getinstance(conn, classname, keys, field_name=field, expr_values=exp['invalid_keyname'], bug_no="") - if ret_value != PASS: + if status != PASS: logger.error("------ FAILED: Invalid InstanceID Key Name.------") - status = ret_value - if status != PASS: break - cleanup_restore(server, virt) + return status if __name__ == "__main__": sys.exit(main()) diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Tue Sep 02 20:15:04 2008 -0700 @@ -48,11 +48,10 @@ from XenKvmLib.enumclass import getInstance from CimTest.Globals import CIM_ERROR_ASSOCIATORS, CIM_ERROR_GETINSTANCE from XenKvmLib.vxml import get_class -from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore from XenKvmLib.classes import get_typed_class from XenKvmLib.logicaldevices import field_err from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from CimTest.ReturnCodes import PASS, FAIL sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @@ -82,7 +81,7 @@ if virt != 'LXC': virt_network = vsxml.xml_get_net_network() keys = { - 'DiskPool' : diskid, + 'DiskPool' : 'DiskPool/%s' % diskid, 'ProcessorPool' : 'ProcessorPool/0' , 'MemoryPool' : 'MemoryPool/0', 'NetworkPool' : 'NetworkPool/%s' %virt_network @@ -96,7 +95,6 @@ key_list = {"InstanceID" : k} inst = get_inst(server, virt, cn, key_list) if inst is None: - cleanup_restore(server, virt) vsxml.destroy(server) return FAIL, gi_inst_list cn = get_typed_class(virt, cn) @@ -134,7 +132,6 @@ except Exception, detail: logger.error(CIM_ERROR_ASSOCIATORS, an) logger.error("Exception: %s", detail) - cleanup_restore(server, virt) return FAIL return PASS @@ -158,10 +155,6 @@ vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk) - # Verify DiskPool on machine - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status ret = vsxml.create(server) if not ret: logger.error("Failed to Create the dom: '%s'", test_dom) @@ -180,12 +173,12 @@ ldlist[net_cn] = "%s/%s" % (test_dom, test_mac) ldlist[proc_cn] = "%s/%s" % (test_dom, "0") - status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid) + status, gi_inst_list = get_pool_details(server, virt, vsxml, + default_pool_name) if status != PASS: return status status = verify_eafp_values(server, virt, ldlist, gi_inst_list) - cleanup_restore(server, virt) vsxml.destroy(server) return status diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Tue Sep 02 20:15:04 2008 -0700 @@ -54,8 +54,8 @@ from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore from XenKvmLib.logicaldevices import verify_device_values +from XenKvmLib.const import default_pool_name sup_types = ['Xen' , 'KVM', 'XenFV', 'LXC'] @@ -70,7 +70,7 @@ } if virt != 'LXC': virt_network = vsxml.xml_get_net_network() - keys['DiskPool'] = diskid + keys['DiskPool'] = 'DiskPool/%s' % default_pool_name keys['ProcessorPool'] = 'ProcessorPool/0' keys['NetworkPool'] = 'NetworkPool/%s' %virt_network @@ -190,24 +190,17 @@ vsxml = virt_type(test_dom, mem = test_mem, vcpus = test_vcpus, mac = test_mac, disk = test_disk) - # Verify DiskPool on machine - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status - ret = vsxml.create(server) if not ret: logger.error("Failed to Create the dom: '%s'", test_dom) - cleanup_restore(server, virt) return FAIL # Get pool list against which the EAFP should be queried - pllist = init_pllist(virt, vsxml, diskid) + pllist = init_pllist(virt, vsxml, default_pool_name) status = verify_eafp_values(server, virt, pllist, test_disk) vsxml.destroy(server) - cleanup_restore(server, virt) return status if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -36,17 +36,15 @@ from XenKvmLib.common_util import try_assoc from CimTest.ReturnCodes import PASS, FAIL from CimTest.Globals import logger -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_pool_name from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file bug_no = "88651" test_dom = "hd_domain" test_mac = "00:11:22:33:44:aa" test_vcpus = 1 -id1 = "DiskPool/%s" %test_dpath +id1 = "DiskPool/%s" % default_pool_name id2 = "MemoryPool/0" id3 = "NetworkPool/xenbr0" id4 = "ProcessorPool/0" @@ -167,7 +165,6 @@ def clean_and_exit(server, virt, msg): logger.error("------FAILED: Invalid %s.------", msg) - cleanup_restore(server, virt) vsxml.undefine(server) @do_main(platform_sup) @@ -188,10 +185,6 @@ vsxml = virt_type (test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk) - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status ret = vsxml.define(options.ip) if not ret: logger.error("Failed to define the dom: %s", test_dom) @@ -215,7 +208,6 @@ clean_and_exit(options.ip, virt, "CCName") return ret - cleanup_restore(options.ip, virt) vsxml.undefine(options.ip) return PASS if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -36,12 +36,10 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_network_name, \ + default_pool_name from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file -from XenKvmLib.const import default_network_name sup_types = ['Xen', 'KVM', 'XenFV'] bug_no = "88651" @@ -480,7 +478,6 @@ def clean_and_exit(server, virt, msg): logger.error("------FAILED: Invalid %s.------", msg) - cleanup_restore(server, virt) vsxml.undefine(server) @do_main(platform_sup) @@ -503,10 +500,6 @@ destroy_and_undefine_all(options.ip) vsxml = get_class(virt)(test_dom, vcpus = test_vcpus, mac = test_mac, \ disk = test_disk) - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status bridge = vsxml.set_vbridge(options.ip, default_network_name) ret = vsxml.define(options.ip) @@ -563,7 +556,6 @@ clean_and_exit(options.ip, virt, "System creationclassname Keyvalue") return ret - cleanup_restore(options.ip, virt) vsxml.undefine(options.ip) return PASS if __name__ == "__main__":

+1 for me Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220411704 25200 # Node ID 41ee8a3bcd8d950602659a59b6186e71d09c2144 # Parent 2eddf83bfeaec1e4729ef4209d76dc39155f629b [TEST] Remove diskpool creation from AC and EAFP tests.
The diskpool is now being created before the tests are run.
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py --- a/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/01_enum.py Tue Sep 02 20:15:04 2008 -0700 @@ -31,7 +31,7 @@ from XenKvmLib.const import do_main, platform_sup from CimTest.Globals import logger, CIM_ERROR_ENUMERATE from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf +from XenKvmLib.const import default_pool_name
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
@@ -87,11 +87,6 @@
cn = 'AllocationCapabilities'
- status, diskid = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - cleanup_restore(options.ip, options.virt) - return FAIL - pools, ac = enum_pools_and_ac(options.ip, options.virt, cn) if len(pools) < 4: logger.error("Only %d pools returned, expected at least 4" % len(pools)) @@ -100,8 +95,6 @@
status = compare_pool_to_ac(ac, pools, cn)
- cleanup_restore(options.ip, options.virt) - return status
if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py --- a/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/AllocationCapabilities/02_alloccap_gi_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -55,17 +55,15 @@ from XenKvmLib import assoc from VirtLib import utils from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS -from CimTest.ReturnCodes import PASS, SKIP +from CimTest.ReturnCodes import PASS, SKIP, FAIL from XenKvmLib.common_util import try_getinstance from VirtLib.live import net_list from XenKvmLib.test_xml import netxml from XenKvmLib.test_doms import create_vnet -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file
-diskid = "%s/%s" % ("DiskPool", test_dpath) +diskid = "%s/%s" % ("DiskPool", default_pool_name) memid = "%s/%s" % ("MemoryPool", 0) procid = "%s/%s" % ("ProcessorPool", 0)
@@ -76,10 +74,6 @@ options = main.options server = options.ip virt = options.virt - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status
#Verify the virtual Network on the machine vir_network = net_list(server) @@ -115,20 +109,19 @@ expr_values=exp['invalid_keyvalue'], bug_no="") if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Value.------") - status = ret_value + return ret_value
field = 'INVALID_Instid_KeyName' + status = FAIL for i in range(len(instid_list)): keys = { field : instid_list[i] } - ret_value = try_getinstance(conn, classname, keys, field_name=field, + status = try_getinstance(conn, classname, keys, field_name=field, expr_values=exp['invalid_keyname'], bug_no="") - if ret_value != PASS: + if status != PASS: logger.error("------ FAILED: Invalid InstanceID Key Name.------") - status = ret_value - if status != PASS: break - cleanup_restore(server, virt) + return status if __name__ == "__main__": sys.exit(main()) diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/01_forward.py Tue Sep 02 20:15:04 2008 -0700 @@ -48,11 +48,10 @@ from XenKvmLib.enumclass import getInstance from CimTest.Globals import CIM_ERROR_ASSOCIATORS, CIM_ERROR_GETINSTANCE from XenKvmLib.vxml import get_class -from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore from XenKvmLib.classes import get_typed_class from XenKvmLib.logicaldevices import field_err from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from CimTest.ReturnCodes import PASS, FAIL
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @@ -82,7 +81,7 @@ if virt != 'LXC': virt_network = vsxml.xml_get_net_network() keys = { - 'DiskPool' : diskid, + 'DiskPool' : 'DiskPool/%s' % diskid, 'ProcessorPool' : 'ProcessorPool/0' , 'MemoryPool' : 'MemoryPool/0', 'NetworkPool' : 'NetworkPool/%s' %virt_network @@ -96,7 +95,6 @@ key_list = {"InstanceID" : k} inst = get_inst(server, virt, cn, key_list) if inst is None: - cleanup_restore(server, virt) vsxml.destroy(server) return FAIL, gi_inst_list cn = get_typed_class(virt, cn) @@ -134,7 +132,6 @@ except Exception, detail: logger.error(CIM_ERROR_ASSOCIATORS, an) logger.error("Exception: %s", detail) - cleanup_restore(server, virt) return FAIL return PASS
@@ -158,10 +155,6 @@ vsxml = virt_type(test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk)
- # Verify DiskPool on machine - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status ret = vsxml.create(server) if not ret: logger.error("Failed to Create the dom: '%s'", test_dom) @@ -180,12 +173,12 @@ ldlist[net_cn] = "%s/%s" % (test_dom, test_mac) ldlist[proc_cn] = "%s/%s" % (test_dom, "0")
- status, gi_inst_list = get_pool_details(server, virt, vsxml, diskid) + status, gi_inst_list = get_pool_details(server, virt, vsxml, + default_pool_name) if status != PASS: return status
status = verify_eafp_values(server, virt, ldlist, gi_inst_list) - cleanup_restore(server, virt) vsxml.destroy(server) return status
diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/02_reverse.py Tue Sep 02 20:15:04 2008 -0700 @@ -54,8 +54,8 @@ from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import create_diskpool_conf, cleanup_restore from XenKvmLib.logicaldevices import verify_device_values +from XenKvmLib.const import default_pool_name
sup_types = ['Xen' , 'KVM', 'XenFV', 'LXC']
@@ -70,7 +70,7 @@ } if virt != 'LXC': virt_network = vsxml.xml_get_net_network() - keys['DiskPool'] = diskid + keys['DiskPool'] = 'DiskPool/%s' % default_pool_name keys['ProcessorPool'] = 'ProcessorPool/0' keys['NetworkPool'] = 'NetworkPool/%s' %virt_network
@@ -190,24 +190,17 @@ vsxml = virt_type(test_dom, mem = test_mem, vcpus = test_vcpus, mac = test_mac, disk = test_disk)
- # Verify DiskPool on machine - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status - ret = vsxml.create(server) if not ret: logger.error("Failed to Create the dom: '%s'", test_dom) - cleanup_restore(server, virt) return FAIL
# Get pool list against which the EAFP should be queried - pllist = init_pllist(virt, vsxml, diskid) + pllist = init_pllist(virt, vsxml, default_pool_name)
status = verify_eafp_values(server, virt, pllist, test_disk) vsxml.destroy(server) - cleanup_restore(server, virt) return status
if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/03_reverse_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -36,17 +36,15 @@ from XenKvmLib.common_util import try_assoc from CimTest.ReturnCodes import PASS, FAIL from CimTest.Globals import logger -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_pool_name from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file
bug_no = "88651" test_dom = "hd_domain" test_mac = "00:11:22:33:44:aa" test_vcpus = 1 -id1 = "DiskPool/%s" %test_dpath +id1 = "DiskPool/%s" % default_pool_name id2 = "MemoryPool/0" id3 = "NetworkPool/xenbr0" id4 = "ProcessorPool/0" @@ -167,7 +165,6 @@
def clean_and_exit(server, virt, msg): logger.error("------FAILED: Invalid %s.------", msg) - cleanup_restore(server, virt) vsxml.undefine(server)
@do_main(platform_sup) @@ -188,10 +185,6 @@ vsxml = virt_type (test_dom, vcpus = test_vcpus, mac = test_mac, disk = test_disk)
- # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status ret = vsxml.define(options.ip) if not ret: logger.error("Failed to define the dom: %s", test_dom) @@ -215,7 +208,6 @@ clean_and_exit(options.ip, virt, "CCName") return ret
- cleanup_restore(options.ip, virt) vsxml.undefine(options.ip) return PASS if __name__ == "__main__": diff -r 2eddf83bfeae -r 41ee8a3bcd8d suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py --- a/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Thu Sep 04 13:14:01 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ElementAllocatedFromPool/04_forward_errs.py Tue Sep 02 20:15:04 2008 -0700 @@ -36,12 +36,10 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL, XFAIL_RC -from XenKvmLib.const import do_main, platform_sup +from XenKvmLib.const import do_main, platform_sup, default_network_name, \ + default_pool_name from XenKvmLib.vxml import get_class from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file -from XenKvmLib.const import default_network_name
sup_types = ['Xen', 'KVM', 'XenFV'] bug_no = "88651" @@ -480,7 +478,6 @@
def clean_and_exit(server, virt, msg): logger.error("------FAILED: Invalid %s.------", msg) - cleanup_restore(server, virt) vsxml.undefine(server)
@do_main(platform_sup) @@ -503,10 +500,6 @@ destroy_and_undefine_all(options.ip) vsxml = get_class(virt)(test_dom, vcpus = test_vcpus, mac = test_mac, \ disk = test_disk) - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status
bridge = vsxml.set_vbridge(options.ip, default_network_name) ret = vsxml.define(options.ip) @@ -563,7 +556,6 @@ clean_and_exit(options.ip, virt, "System creationclassname Keyvalue") return ret
- cleanup_restore(options.ip, virt) vsxml.undefine(options.ip) return PASS if __name__ == "__main__":
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220654590 25200 # Node ID 553bf81e676d9cdb9f752614aa6b7b60652b6802 # Parent 41ee8a3bcd8d950602659a59b6186e71d09c2144 [TEST] Remove diskpool creation from HostSys and HRP tests. The diskpool is now being created before the tests are run. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py --- a/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Fri Sep 05 15:43:10 2008 -0700 @@ -51,7 +51,7 @@ from VirtLib import utils from CimTest.Globals import logger, CIM_ERROR_ASSOCIATORNAMES, \ CIM_ERROR_ASSOCIATORS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.vxml import XenXML, KVMXML, get_class from XenKvmLib.assoc import AssociatorNames, Associators from XenKvmLib.common_util import get_host_info @@ -59,7 +59,6 @@ from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib.logicaldevices import verify_device_values -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @@ -87,7 +86,7 @@ npool = get_typed_class(virt, 'NetworkPool') dpool = get_typed_class(virt, 'DiskPool') ppool = get_typed_class(virt, 'ProcessorPool') - exp_pllist[dpool] = dp_InstID + exp_pllist[dpool] = 'DiskPool/%s' % dp_InstID exp_pllist[npool] = '%s/%s' %('NetworkPool', net_name) exp_pllist[ppool] = 'ProcessorPool/0' exp_pllist[mpool] = 'MemoryPool/0' @@ -199,7 +198,6 @@ except Exception, detail: logger.error(CIM_ERROR_ASSOCIATORS, an) logger.error("Exception: %s", detail) - cleanup_restore(server, virt) status = FAIL return status @@ -234,12 +232,6 @@ # Get the network pool info which is used by the VS. net_name = vsxml.xml_get_net_network() - status, dpool_name = create_diskpool_conf(server, virt) - if status != PASS: - logger.error("Failed to create diskpool") - vsxml.undefine(server) - return FAIL - # Get the hostedResourcePool info first cn = classname an = get_typed_class(virt, "HostedResourcePool") @@ -247,21 +239,18 @@ status, pool = get_assocname_info(server, cn, an, qcn, host_name, virt) if status != PASS: vsxml.undefine(server) - cleanup_restore(server, virt=virt) return status - in_pllist = pool_init_list(virt, pool, net_name, dpool_name) + in_pllist = pool_init_list(virt, pool, net_name, default_pool_name) # One pool for each Device type, hence len should be 4 exp_len = 4 status = check_len(an, in_pllist, qcn, exp_len) if status != PASS: vsxml.undefine(server) - cleanup_restore(server, virt=virt) return FAIL status = verify_eafp_values(server, in_pllist, virt, test_disk) vsxml.undefine(server) - cleanup_restore(server, virt=virt) return status if __name__ == "__main__": sys.exit(main()) diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -30,20 +30,14 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @do_main(sup_types) def main(): options = main.options status = FAIL - - status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL keys = ['Name', 'CreationClassName'] try: @@ -81,10 +75,10 @@ if cname.find("NetworkPool") >=0 and \ items['InstanceID'] == "NetworkPool/%s" %default_network_name: status = PASS - if cname.find("DiskPool") >=0 and items['InstanceID'] == "DiskPool/%s" %dpool_name: + if cname.find("DiskPool") >=0 and \ + items['InstanceID'] == "DiskPool/%s" % default_pool_name: status = PASS - cleanup_restore(options.ip, options.virt) return status if __name__ == "__main__": diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 @@ -29,20 +29,14 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf sup_types=['Xen', 'KVM', 'XenFV', 'LXC'] @do_main(sup_types) def main(): options = main.options status = PASS - - status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL keys = ['Name', 'CreationClassName'] try: @@ -59,8 +53,8 @@ poollist = { mem_cn : "MemoryPool/0", proc_cn : "ProcessorPool/0", - net_cn : "NetworkPool/%s" %default_network_name, - disk_cn : "DiskPool/%s" %dpool_name + net_cn : "NetworkPool/%s" % default_network_name, + disk_cn : "DiskPool/%s" % default_pool_name } for k, v in poollist.items(): @@ -79,7 +73,6 @@ status = FAIL if status != PASS: break - cleanup_restore(options.ip, options.virt) return status if __name__ == "__main__": sys.exit(main()) diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py Fri Sep 05 15:43:10 2008 -0700 @@ -29,9 +29,8 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] expr_values = { @@ -50,11 +49,6 @@ options = main.options status = PASS - status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL - assoc_classname = get_typed_class(options.virt, "HostedResourcePool") proc_cn = get_typed_class(options.virt, "ProcessorPool") mem_cn = get_typed_class(options.virt, "MemoryPool") @@ -68,7 +62,7 @@ mem_cn : "MemoryPool/0", proc_cn : "ProcessorPool/0", net_cn : "NetworkPool/%s" %default_network_name, - disk_cn : "DiskPool/%s" %dpool_name + disk_cn : "DiskPool/%s" % default_pool_name } for k, v in poollist.items(): keys = { "Wrong" : v} @@ -87,7 +81,6 @@ logger.error("------ FAILED: Invalid Name Key Value.------") status = ret - cleanup_restore(options.ip, options.virt) return status if __name__ == "__main__": sys.exit(main())

+1 for me. Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220654590 25200 # Node ID 553bf81e676d9cdb9f752614aa6b7b60652b6802 # Parent 41ee8a3bcd8d950602659a59b6186e71d09c2144 [TEST] Remove diskpool creation from HostSys and HRP tests.
The diskpool is now being created before the tests are run.
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py --- a/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostSystem/04_hs_to_EAPF.py Fri Sep 05 15:43:10 2008 -0700 @@ -51,7 +51,7 @@ from VirtLib import utils from CimTest.Globals import logger, CIM_ERROR_ASSOCIATORNAMES, \ CIM_ERROR_ASSOCIATORS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.vxml import XenXML, KVMXML, get_class from XenKvmLib.assoc import AssociatorNames, Associators from XenKvmLib.common_util import get_host_info @@ -59,7 +59,6 @@ from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.test_doms import destroy_and_undefine_all from XenKvmLib.logicaldevices import verify_device_values -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
@@ -87,7 +86,7 @@ npool = get_typed_class(virt, 'NetworkPool') dpool = get_typed_class(virt, 'DiskPool') ppool = get_typed_class(virt, 'ProcessorPool') - exp_pllist[dpool] = dp_InstID + exp_pllist[dpool] = 'DiskPool/%s' % dp_InstID exp_pllist[npool] = '%s/%s' %('NetworkPool', net_name) exp_pllist[ppool] = 'ProcessorPool/0' exp_pllist[mpool] = 'MemoryPool/0' @@ -199,7 +198,6 @@ except Exception, detail: logger.error(CIM_ERROR_ASSOCIATORS, an) logger.error("Exception: %s", detail) - cleanup_restore(server, virt) status = FAIL return status
@@ -234,12 +232,6 @@ # Get the network pool info which is used by the VS. net_name = vsxml.xml_get_net_network()
- status, dpool_name = create_diskpool_conf(server, virt) - if status != PASS: - logger.error("Failed to create diskpool") - vsxml.undefine(server) - return FAIL - # Get the hostedResourcePool info first cn = classname an = get_typed_class(virt, "HostedResourcePool") @@ -247,21 +239,18 @@ status, pool = get_assocname_info(server, cn, an, qcn, host_name, virt) if status != PASS: vsxml.undefine(server) - cleanup_restore(server, virt=virt) return status
- in_pllist = pool_init_list(virt, pool, net_name, dpool_name) + in_pllist = pool_init_list(virt, pool, net_name, default_pool_name) # One pool for each Device type, hence len should be 4 exp_len = 4 status = check_len(an, in_pllist, qcn, exp_len) if status != PASS: vsxml.undefine(server) - cleanup_restore(server, virt=virt) return FAIL
status = verify_eafp_values(server, in_pllist, virt, test_disk) vsxml.undefine(server) - cleanup_restore(server, virt=virt) return status if __name__ == "__main__": sys.exit(main()) diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -30,20 +30,14 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @do_main(sup_types) def main(): options = main.options status = FAIL - - status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL
keys = ['Name', 'CreationClassName'] try: @@ -81,10 +75,10 @@ if cname.find("NetworkPool") >=0 and \ items['InstanceID'] == "NetworkPool/%s" %default_network_name: status = PASS - if cname.find("DiskPool") >=0 and items['InstanceID'] == "DiskPool/%s" %dpool_name: + if cname.find("DiskPool") >=0 and \ + items['InstanceID'] == "DiskPool/%s" % default_pool_name: status = PASS
- cleanup_restore(options.ip, options.virt)
return status if __name__ == "__main__": diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 @@ -29,20 +29,14 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS, FAIL -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
sup_types=['Xen', 'KVM', 'XenFV', 'LXC'] @do_main(sup_types) def main(): options = main.options status = PASS - - status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL
keys = ['Name', 'CreationClassName'] try: @@ -59,8 +53,8 @@ poollist = { mem_cn : "MemoryPool/0", proc_cn : "ProcessorPool/0", - net_cn : "NetworkPool/%s" %default_network_name, - disk_cn : "DiskPool/%s" %dpool_name + net_cn : "NetworkPool/%s" % default_network_name, + disk_cn : "DiskPool/%s" % default_pool_name }
for k, v in poollist.items(): @@ -79,7 +73,6 @@ status = FAIL if status != PASS: break - cleanup_restore(options.ip, options.virt) return status if __name__ == "__main__": sys.exit(main()) diff -r 41ee8a3bcd8d -r 553bf81e676d suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py --- a/suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py Tue Sep 02 20:15:04 2008 -0700 +++ b/suites/libvirt-cim/cimtest/HostedResourcePool/04_reverse_errs.py Fri Sep 05 15:43:10 2008 -0700 @@ -29,9 +29,8 @@ from CimTest import Globals from CimTest.Globals import logger from CimTest.ReturnCodes import PASS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] expr_values = { @@ -50,11 +49,6 @@ options = main.options status = PASS
- status, dpool_name = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - logger.error("Failed to create diskpool") - return FAIL - assoc_classname = get_typed_class(options.virt, "HostedResourcePool") proc_cn = get_typed_class(options.virt, "ProcessorPool") mem_cn = get_typed_class(options.virt, "MemoryPool") @@ -68,7 +62,7 @@ mem_cn : "MemoryPool/0", proc_cn : "ProcessorPool/0", net_cn : "NetworkPool/%s" %default_network_name, - disk_cn : "DiskPool/%s" %dpool_name + disk_cn : "DiskPool/%s" % default_pool_name } for k, v in poollist.items(): keys = { "Wrong" : v} @@ -87,7 +81,6 @@ logger.error("------ FAILED: Invalid Name Key Value.------") status = ret
- cleanup_restore(options.ip, options.virt) return status if __name__ == "__main__": sys.exit(main())
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220654590 25200 # Node ID 3e14187adcadb92c530131368972b968ace5bc3b # Parent 553bf81e676d9cdb9f752614aa6b7b60652b6802 [TEST] Remove diskpool creation from RAFP, RP, and SDC tests. The diskpool is now being created before the tests are run. Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -31,10 +31,8 @@ from XenKvmLib.vxml import get_class from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from CimTest.ReturnCodes import PASS, FAIL, XFAIL -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf -from XenKvmLib.const import default_network_name sup_types = ['Xen', 'XenFV', 'KVM', 'LXC'] @@ -120,11 +118,7 @@ vsxml.undefine(options.ip) return status - status, diskid = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - cleanup_restore(options.ip, options.virt) - vsxml.undefine(options.ip) - return status + diskp_id = "DiskPool/%s" % default_pool_name if options.virt == 'LXC': pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} } @@ -132,7 +126,7 @@ else: pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"}, "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"}, - "DiskPool" : {'InstanceID' : diskid}, + "DiskPool" : {'InstanceID' : diskp_id}, "NetworkPool" : {'InstanceID' : "NetworkPool/%s" \ % test_npool }} rasd = { "MemoryPool" : "%s/mem" % test_dom, @@ -150,7 +144,6 @@ if status != PASS: break - cleanup_restore(options.ip, options.virt) vsxml.undefine(options.ip) return status diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 @@ -31,11 +31,9 @@ from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib import enumclass -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf -from XenKvmLib.const import default_network_name sup_types = ['Xen', 'XenFV', 'KVM', 'LXC'] test_dom = "RAFP_dom" @@ -87,7 +85,7 @@ } disk = { 'rasd_id' : '%s/%s' % (test_dom, test_disk), - 'pool_id' : diskid + 'pool_id' : 'DiskPool/%s' % default_pool_name } if virt == 'LXC': @@ -170,12 +168,7 @@ vsxml.undefine(server) return status - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - vsxml.undefine(server) - return status - - cn_id_list = init_list(test_disk, diskid, options.virt) + cn_id_list = init_list(test_disk, default_pool_name, options.virt) for rasd_cn, id_info in cn_id_list.iteritems(): status = get_rasdinst_verify_pool_from_RAFP(server, virt, vsxml, @@ -183,7 +176,6 @@ if status != PASS: return status - cleanup_restore(server, virt) vsxml.undefine(server) return status diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/01_enum.py --- a/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700 @@ -32,28 +32,20 @@ from XenKvmLib import vxml from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from VirtLib.live import net_list from XenKvmLib.vsms import RASD_TYPE_PROC, RASD_TYPE_MEM, RASD_TYPE_NET_ETHER, \ RASD_TYPE_DISK -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] -diskid = "%s/%s" % ("DiskPool", test_dpath) dp_cn = 'DiskPool' mp_cn = 'MemoryPool' pp_cn = 'ProcessorPool' np_cn = 'NetworkPool' def init_list(server, virt): - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status, None - # Verify the Virtual network on machine vir_network = net_list(server, virt) if len(vir_network) > 0: @@ -68,7 +60,7 @@ test_network) return SKIP, None - disk_instid = '%s/%s' % (dp_cn, test_dpath) + disk_instid = '%s/%s' % (dp_cn, default_pool_name) net_instid = '%s/%s' % (np_cn, test_network) mem_instid = '%s/0' % mp_cn proc_instid = '%s/0' % pp_cn @@ -78,7 +70,7 @@ get_typed_class(virt, dp_cn) : [disk_instid, RASD_TYPE_DISK], get_typed_class(virt, np_cn) : [net_instid, RASD_TYPE_NET_ETHER] } - return status, pool_list + return PASS, pool_list def print_error(fieldname="", ret_value="", exp_value=""): logger.error("%s Mismatch", fieldname) @@ -113,6 +105,7 @@ virt = "Xen" else: virt = main.options.virt + status, pool_list = init_list(ip, virt) if status != PASS: logger.error("Failed to initialise the list") @@ -149,7 +142,6 @@ return FAIL status = verify_fields(pool_list, netpool, get_typed_class(virt, np_cn)) - cleanup_restore(ip, virt) return status if __name__ == "__main__": diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py --- a/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700 @@ -36,9 +36,7 @@ from distutils.file_util import move_file from CimTest.ReturnCodes import PASS, SKIP from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS -from XenKvmLib.const import do_main -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file +from XenKvmLib.const import do_main, default_pool_name sup_types = ['Xen', 'KVM', 'XenFV', 'LXC'] @@ -95,11 +93,6 @@ conn = assoc.myWBEMConnection('http://%s' % ip, (CIM_USER, CIM_PASS), CIM_NS) - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status - # Verify the Virtual Network on the machine. vir_network = net_list(ip, virt) if len(vir_network) > 0: @@ -112,7 +105,6 @@ if not ret: logger.error("Failed to create the Virtual Network '%s'", test_network) - cleanup_restore(ip, virt) return SKIP netid = "%s/%s" % ("NetworkPool", test_network) @@ -134,16 +126,13 @@ ret_value = err_invalid_instid_keyname(conn, cn, instid) if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Name.------") - cleanup_restore(ip, virt) return ret_value ret_value = err_invalid_instid_keyvalue(conn, cn) if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Value.------") - cleanup_restore(ip, virt) return ret_value - cleanup_restore(ip, virt) return PASS if __name__ == "__main__": diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -60,17 +60,16 @@ from VirtLib.live import virsh_version from CimTest.ReturnCodes import PASS, FAIL, SKIP from CimTest.Globals import logger, CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \ - print_field_error -from XenKvmLib.const import default_network_name +from XenKvmLib.common_util import print_field_error platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC'] -memid = "%s/%s" % ("MemoryPool", 0) -procid = "%s/%s" % ("ProcessorPool", 0) -test_npool = default_network_name +memid = "MemoryPool/0" +procid = "ProcessorPool/0" +netid = "NetworkPool/%s" % default_network_name +diskid = "DiskPool/%s" % default_pool_name def get_or_bail(virt, ip, id, pool_class): """ @@ -83,7 +82,6 @@ except Exception, detail: logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class) logger.error("Exception: %s", detail) - cleanup_restore(ip, virt) sys.exit(FAIL) return instance @@ -132,20 +130,14 @@ dpool = npool = mpool = ppool = None pool_set = [] try : - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status, pool_set, None - - dpool = get_pool_info(virt, server, diskid, poolname="DiskPool") + dpool = get_pool_info(virt, server, diskid, poolname="DiskPool") mpool = get_pool_info(virt, server, memid, poolname= "MemoryPool") ppool = get_pool_info(virt, server, procid, poolname= "ProcessorPool") - netid = "%s/%s" % ("NetworkPool", test_npool) npool = get_pool_info(virt, server, netid, poolname= "NetworkPool") if dpool.InstanceID == None or mpool.InstanceID == None \ or npool.InstanceID == None or ppool.InstanceID == None: logger.error("Get pool None") - cleanup_restore(server, virt) return FAIL else: pool_set = [dpool, mpool, ppool, npool] @@ -204,12 +196,10 @@ status, pool = get_pool_details(virt, server) if status != PASS: - cleanup_restore(server, virt) return FAIL status = verify_sdc_with_ac(virt, server, pool) - cleanup_restore(server, virt) return status if __name__ == "__main__":

+1 for me. Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1220654590 25200 # Node ID 3e14187adcadb92c530131368972b968ace5bc3b # Parent 553bf81e676d9cdb9f752614aa6b7b60652b6802 [TEST] Remove diskpool creation from RAFP, RP, and SDC tests.
The diskpool is now being created before the tests are run.
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -31,10 +31,8 @@ from XenKvmLib.vxml import get_class from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from CimTest.ReturnCodes import PASS, FAIL, XFAIL -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf -from XenKvmLib.const import default_network_name
sup_types = ['Xen', 'XenFV', 'KVM', 'LXC']
@@ -120,11 +118,7 @@ vsxml.undefine(options.ip) return status
- status, diskid = create_diskpool_conf(options.ip, options.virt) - if status != PASS: - cleanup_restore(options.ip, options.virt) - vsxml.undefine(options.ip) - return status + diskp_id = "DiskPool/%s" % default_pool_name
if options.virt == 'LXC': pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"} } @@ -132,7 +126,7 @@ else: pool = { "MemoryPool" : {'InstanceID' : "MemoryPool/0"}, "ProcessorPool" : {'InstanceID' : "ProcessorPool/0"}, - "DiskPool" : {'InstanceID' : diskid}, + "DiskPool" : {'InstanceID' : diskp_id}, "NetworkPool" : {'InstanceID' : "NetworkPool/%s" \ % test_npool }} rasd = { "MemoryPool" : "%s/mem" % test_dom, @@ -150,7 +144,6 @@ if status != PASS: break
- cleanup_restore(options.ip, options.virt) vsxml.undefine(options.ip) return status
diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py --- a/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourceAllocationFromPool/02_reverse.py Fri Sep 05 15:43:10 2008 -0700 @@ -31,11 +31,9 @@ from XenKvmLib.classes import get_typed_class from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib import enumclass -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf -from XenKvmLib.const import default_network_name
sup_types = ['Xen', 'XenFV', 'KVM', 'LXC'] test_dom = "RAFP_dom" @@ -87,7 +85,7 @@ }
disk = { 'rasd_id' : '%s/%s' % (test_dom, test_disk), - 'pool_id' : diskid + 'pool_id' : 'DiskPool/%s' % default_pool_name }
if virt == 'LXC': @@ -170,12 +168,7 @@ vsxml.undefine(server) return status
- status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - vsxml.undefine(server) - return status - - cn_id_list = init_list(test_disk, diskid, options.virt) + cn_id_list = init_list(test_disk, default_pool_name, options.virt)
for rasd_cn, id_info in cn_id_list.iteritems(): status = get_rasdinst_verify_pool_from_RAFP(server, virt, vsxml, @@ -183,7 +176,6 @@ if status != PASS: return status
- cleanup_restore(server, virt) vsxml.undefine(server) return status
diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/01_enum.py --- a/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePool/01_enum.py Fri Sep 05 15:43:10 2008 -0700 @@ -32,28 +32,20 @@ from XenKvmLib import vxml from CimTest import Globals from CimTest.Globals import logger -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from VirtLib.live import net_list from XenKvmLib.vsms import RASD_TYPE_PROC, RASD_TYPE_MEM, RASD_TYPE_NET_ETHER, \ RASD_TYPE_DISK -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
-diskid = "%s/%s" % ("DiskPool", test_dpath) dp_cn = 'DiskPool' mp_cn = 'MemoryPool' pp_cn = 'ProcessorPool' np_cn = 'NetworkPool'
def init_list(server, virt): - # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status, None - # Verify the Virtual network on machine vir_network = net_list(server, virt) if len(vir_network) > 0: @@ -68,7 +60,7 @@ test_network) return SKIP, None
- disk_instid = '%s/%s' % (dp_cn, test_dpath) + disk_instid = '%s/%s' % (dp_cn, default_pool_name) net_instid = '%s/%s' % (np_cn, test_network) mem_instid = '%s/0' % mp_cn proc_instid = '%s/0' % pp_cn @@ -78,7 +70,7 @@ get_typed_class(virt, dp_cn) : [disk_instid, RASD_TYPE_DISK], get_typed_class(virt, np_cn) : [net_instid, RASD_TYPE_NET_ETHER] } - return status, pool_list + return PASS, pool_list
def print_error(fieldname="", ret_value="", exp_value=""): logger.error("%s Mismatch", fieldname) @@ -113,6 +105,7 @@ virt = "Xen" else: virt = main.options.virt + status, pool_list = init_list(ip, virt) if status != PASS: logger.error("Failed to initialise the list") @@ -149,7 +142,6 @@ return FAIL status = verify_fields(pool_list, netpool, get_typed_class(virt, np_cn))
- cleanup_restore(ip, virt) return status
if __name__ == "__main__": diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py --- a/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePool/02_rp_gi_errors.py Fri Sep 05 15:43:10 2008 -0700 @@ -36,9 +36,7 @@ from distutils.file_util import move_file from CimTest.ReturnCodes import PASS, SKIP from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS -from XenKvmLib.const import do_main -from XenKvmLib.common_util import cleanup_restore, test_dpath, \ -create_diskpool_file +from XenKvmLib.const import do_main, default_pool_name
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
@@ -95,11 +93,6 @@ conn = assoc.myWBEMConnection('http://%s' % ip, (CIM_USER, CIM_PASS), CIM_NS)
- # Verify DiskPool on machine - status = create_diskpool_file() - if status != PASS: - return status - # Verify the Virtual Network on the machine. vir_network = net_list(ip, virt) if len(vir_network) > 0: @@ -112,7 +105,6 @@ if not ret: logger.error("Failed to create the Virtual Network '%s'", test_network) - cleanup_restore(ip, virt) return SKIP netid = "%s/%s" % ("NetworkPool", test_network)
@@ -134,16 +126,13 @@ ret_value = err_invalid_instid_keyname(conn, cn, instid) if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Name.------") - cleanup_restore(ip, virt) return ret_value
ret_value = err_invalid_instid_keyvalue(conn, cn) if ret_value != PASS: logger.error("------ FAILED: Invalid InstanceID Key Value.------") - cleanup_restore(ip, virt) return ret_value
- cleanup_restore(ip, virt) return PASS
if __name__ == "__main__": diff -r 553bf81e676d -r 3e14187adcad suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py --- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700 +++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Fri Sep 05 15:43:10 2008 -0700 @@ -60,17 +60,16 @@ from VirtLib.live import virsh_version from CimTest.ReturnCodes import PASS, FAIL, SKIP from CimTest.Globals import logger, CIM_ERROR_GETINSTANCE, CIM_ERROR_ASSOCIATORS -from XenKvmLib.const import do_main +from XenKvmLib.const import do_main, default_pool_name, default_network_name from XenKvmLib.classes import get_typed_class -from XenKvmLib.common_util import cleanup_restore, create_diskpool_conf, \ - print_field_error -from XenKvmLib.const import default_network_name +from XenKvmLib.common_util import print_field_error
platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC']
-memid = "%s/%s" % ("MemoryPool", 0) -procid = "%s/%s" % ("ProcessorPool", 0) -test_npool = default_network_name +memid = "MemoryPool/0" +procid = "ProcessorPool/0" +netid = "NetworkPool/%s" % default_network_name +diskid = "DiskPool/%s" % default_pool_name
def get_or_bail(virt, ip, id, pool_class): """ @@ -83,7 +82,6 @@ except Exception, detail: logger.error(CIM_ERROR_GETINSTANCE, '%s' % pool_class) logger.error("Exception: %s", detail) - cleanup_restore(ip, virt) sys.exit(FAIL) return instance
@@ -132,20 +130,14 @@ dpool = npool = mpool = ppool = None pool_set = [] try : - status, diskid = create_diskpool_conf(server, virt) - if status != PASS: - return status, pool_set, None - - dpool = get_pool_info(virt, server, diskid, poolname="DiskPool") + dpool = get_pool_info(virt, server, diskid, poolname="DiskPool") mpool = get_pool_info(virt, server, memid, poolname= "MemoryPool") ppool = get_pool_info(virt, server, procid, poolname= "ProcessorPool")
- netid = "%s/%s" % ("NetworkPool", test_npool) npool = get_pool_info(virt, server, netid, poolname= "NetworkPool") if dpool.InstanceID == None or mpool.InstanceID == None \ or npool.InstanceID == None or ppool.InstanceID == None: logger.error("Get pool None") - cleanup_restore(server, virt) return FAIL else: pool_set = [dpool, mpool, ppool, npool] @@ -204,12 +196,10 @@
status, pool = get_pool_details(virt, server) if status != PASS: - cleanup_restore(server, virt) return FAIL
status = verify_sdc_with_ac(virt, server, pool)
- cleanup_restore(server, virt) return status
if __name__ == "__main__":
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
participants (2)
-
Deepti B Kalakeri
-
Kaitlin Rupert