[PATCH] [TEST] Updating RPCS/10_create_storagevolume.py

# HG changeset patch # User Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # Date 1253097826 14400 # Node ID 0a64f90aabb5dd63ac2ab677981a939b2fcf5eeb # Parent 9e08670a3c3749738a65fec7f2faa4c2b68a7092 [TEST] Updating RPCS/10_create_storagevolume.py Updating RPCS/10_create_storagevolume.py to create and use its own dir pool for StorageVol. If we try to use the default_pool_name then this will cause regression in the further tests which refer to the /tmp/cimtest-vol.img as all the information regarding this will get cleared only when the pool under which it is created it destoyed. Tested with KVM and current sources on SLES11. Signed-off-by: Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> diff -r 9e08670a3c37 -r 0a64f90aabb5 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py --- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Thu Sep 10 09:32:01 2009 -0700 +++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Wed Sep 16 06:43:46 2009 -0400 @@ -31,8 +31,7 @@ from VirtLib import utils from CimTest.Globals import logger from CimTest.ReturnCodes import FAIL, PASS, SKIP -from XenKvmLib.const import do_main, platform_sup, default_pool_name, \ - get_provider_version +from XenKvmLib.const import do_main, platform_sup, get_provider_version from XenKvmLib.vsms import RASD_TYPE_STOREVOL from XenKvmLib.rasd import libvirt_rasd_storagepool_changes from XenKvmLib import rpcs_service @@ -129,17 +128,15 @@ return PASS -def cleanup_pool_vol(server, virt, pool_name, clean_vol, exp_vol_path): +def cleanup_pool_vol(server, virt, pool_name, exp_vol_path): try: - if clean_vol == True: - status = destroy_diskpool(server, virt, pool_name) + status = destroy_diskpool(server, virt, pool_name) + if status != PASS: + raise Exception("Unable to destroy diskpool '%s'" % pool_name) + else: + status = undefine_diskpool(server, virt, pool_name) if status != PASS: - raise Exception("Unable to destroy diskpool '%s'" % pool_name) - else: - status = undefine_diskpool(server, virt, pool_name) - if status != PASS: - raise Exception("Unable to undefine diskpool '%s'" \ - % pool_name) + raise Exception("Unable to undefine diskpool '%s'" % pool_name) except Exception, details: logger.error("Exception details: %s", details) return FAIL @@ -177,18 +174,13 @@ status = FAIL res = [FAIL] found = 0 - clean_pool=True try: - if pool_type == DIR_POOL: - pool_name = default_pool_name - clean_pool=False - else: - status = create_pool(server, virt, pool_name, pool_attr, - mode_type=pool_type, pool_type="DiskPool") + status = create_pool(server, virt, pool_name, pool_attr, + mode_type=pool_type, pool_type="DiskPool") - if status != PASS: - logger.error("Failed to create pool '%s'", pool_name) - return status + if status != PASS: + logger.error("Failed to create pool '%s'", pool_name) + return status dp_inst_id = "%s/%s" % (dp_cn, pool_name) stovol_settings = get_stovol_settings(server, virt, @@ -211,18 +203,18 @@ found = verify_vol(server, virt, pool_name, exp_vol_path, found) stovol_status = verify_sto_vol_rasd(virt, server, dp_inst_id, exp_vol_path) + + ret = cleanup_pool_vol(server, virt, pool_name, exp_vol_path) + if res[0] == PASS and found == 1 and \ + ret == PASS and stovol_status == PASS: + status = PASS + else: + return FAIL except Exception, details: logger.error("Exception details: %s", details) status = FAIL - ret = cleanup_pool_vol(server, virt, pool_name, - clean_pool, exp_vol_path) - if res[0] == PASS and found == 1 and \ - ret == PASS and stovol_status == PASS: - status = PASS - else: - return FAIL return status if __name__ == "__main__":

Deepti B. Kalakeri wrote:
# HG changeset patch # User Deepti B. Kalakeri <deeptik@linux.vnet.ibm.com> # Date 1253097826 14400 # Node ID 0a64f90aabb5dd63ac2ab677981a939b2fcf5eeb # Parent 9e08670a3c3749738a65fec7f2faa4c2b68a7092 [TEST] Updating RPCS/10_create_storagevolume.py
Updating RPCS/10_create_storagevolume.py to create and use its own dir pool for StorageVol. If we try to use the default_pool_name then this will cause regression in the further tests which refer to the /tmp/cimtest-vol.img as all the information regarding this will get cleared only when the pool under which it is created it destoyed.
Not sure what I understand what you mean here. Why not make sure /tmp/cimtest-vol.img is removed before the test exits? Then you can create the image in the default pool. Just remove the volume when you're done. I think you can still use the default pool if you'd like, but either approach is fine. However, it looks like you create a new diskpool, but the storage volume is still being created in /tmp. When I print the disk RASD: root/virt:KVM_DiskPool.InstanceID="DiskPool/DISK_POOL_DIR" When I print the exp_vol_path: /tmp/cimtest-vol.img The test still passed because vol_list() returns the list of volumes in /tmp, even though the pool_name is DISK_POOL_DIR - so something is off here. Can you take a look to see what might be causing this? -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (2)
-
Deepti B. Kalakeri
-
Kaitlin Rupert