yunguol(a)cn.ibm.com wrote:
# HG changeset patch
# User Yun Guo Lian <yunguol(a)cn.ibm.com>
# Date 1242720742 25200
# Node ID 0666f518db98e4406521f1a618318a98aead6974
# Parent 6dc2d815e480237c91115cd0d86f6325503e33f7
[TEST]Add new tc to validate that the Disk child pool can be deleted through the
providers
Tested for KVM with current sources
Signed-off-by: Guolian Yun<yunguol(a)cn.ibm.com>
diff -r 6dc2d815e480 -r 0666f518db98
suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++
b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/09_DeleteDiskPool.py Tue May
19 01:12:22 2009 -0700
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Guolian Yun <yunguol(a)cn.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# This test case should test the DeleteResourcePool service
+# supplied by the RPCS provider.
+# The DeleteResourcePool is used to delete a resource pool.
+# DeleteResourcePool() details:
+# Input
+# -----
+# IN -- Pool -- CIM_ResourcePool REF -- The resource pool to delete
+#
+# Output
+# ------
+# OUT -- Job -- CIM_ConcreteJob REF -- Returned job if started
+# OUT -- Error-- String -- Encoded error instance if the operation
+# failed and did not return a job.
+#
+# Exception details before Revision 841
+# -----
+# Error code: CIM_ERR_NOT_SUPPORTED
+#
+# After revision 841, the service is implemented
+#
+# -Date: 19.05.2009
+
+import sys
+import pywbem
+from XenKvmLib import rpcs_service
+from CimTest.Globals import logger
+from CimTest.ReturnCodes import FAIL, PASS
+from XenKvmLib.const import do_main, platform_sup, get_provider_version
+from XenKvmLib.enumclass import EnumInstances, EnumNames
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.pool import create_netpool, verify_pool
+
+cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED
+cim_mname = "DeleteResourcePool"
+libvirt_cim_child_pool_rev = 841
+test_pool = "pool"
+
+@do_main(platform_sup)
+def main():
+ status = FAIL
+ options = main.options
+ rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \
+ "ResourcePoolConfigurationService"))(options.ip)
+ curr_cim_rev, changeset = get_provider_version(options.virt, options.ip)
+ if curr_cim_rev < libvirt_cim_child_pool_rev:
+ try:
+ rpcs_conn.DeleteResourcePool()
+ except pywbem.CIMError, (err_no, desc):
+ if err_no == cim_errno :
+ logger.info("Got expected exception for '%s' service",
cim_mname)
+ logger.info("Errno is '%s' ", err_no)
+ logger.info("Error string is '%s'", desc)
+ return PASS
+ else:
+ logger.error("Unexpected rc code %s and description %s\n",
+ err_no, desc)
+ return FAIL
+ elif curr_cim_rev >= libvirt_cim_child_pool_rev:
+ pool_attr = {
+ "Path" : "/tmp"
+ }
+ dp = get_typed_class(options.virt, 'DiskPool')
+ dp_id = "DiskPool/%s" % test_pool
+
+ status = create_netpool(options.ip, options.virt, test_pool, pool_attr,
+ pool_type="DiskPool")
you can rename create_netpool() to something generic if you are planning
to use for network and diskpool
+ if status != PASS:
+ logger.error("Error in diskpool creation")
+ return FAIL
+
+ status = verify_pool(options.ip, options.virt, dp,
+ test_pool, pool_attr,
+ pool_type="DiskPool")
+ if status != PASS:
+ logger.error("Error in diskpool verification")
+ destroy_netpool(options.ip, options.virt, test_pool)
+ return FAIL
+
+ pool = EnumNames(options.ip, dp)
+ for i in range(0, len(pool)):
+ ret_pool = pool[i].keybindings['InstanceID']
+ if ret_pool == dp_id:
+ pool_settings = pool[i]
+ break
Please initialize the pool_settings, otherwise we will get an exception
with pool_settings will not be set if ret_pool != dp_id as pool_settings
will not be set.
+ try:
+ rpcs_conn.DeleteResourcePool(Pool = pool_settings)
+ pool = EnumInstances(options.ip, dp)
+ for i in range(0, len(pool)):
+ ret_pool = pool[i].InstanceID
+ if ret_pool == dp_id:
+ raise Exception("Failed to delete %s" % test_pool)
Need to destroy the pool if its found before returning failure.
+ status = PASS
+ except Exception, details:
+ logger.error(details)
+ return FAIL
+
+ return status
+
+if __name__ == "__main__":
+ sys.exit(main())
diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/pool.py
--- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Sun May 17 23:34:58 2009 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Tue May 19 01:12:22 2009 -0700
@@ -32,7 +32,7 @@
from XenKvmLib import rpcs_service
import pywbem
from CimTest.CimExt import CIMClassMOF
-from XenKvmLib.vxml import NetXML
+from XenKvmLib.vxml import NetXML, PoolXML
cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED
cim_mname = "CreateChildResourcePool"
@@ -105,12 +105,12 @@
return volume
-def get_pool_rasds(server, virt):
+def get_pool_rasds(server, virt, pool_type="NetworkPool"):
net_pool_rasds = []
ac_cn = get_typed_class(virt, "AllocationCapabilities")
an_cn = get_typed_class(virt, "SettingsDefineCapabilities")
- key_list = {"InstanceID" : "NetworkPool/0" }
+ key_list = {"InstanceID" : "%s/0" % pool_type}
try:
inst = GetInstance(server, ac_cn, key_list)
@@ -144,7 +144,8 @@
return PASS
-def create_netpool(server, virt, test_pool, pool_attr_list):
+def create_netpool(server, virt, test_pool, pool_attr_list,
+ pool_type="NetworkPool"):
status = PASS
rpcs = get_typed_class(virt, "ResourcePoolConfigurationService")
rpcs_conn = eval("rpcs_service." + rpcs)(server)
@@ -162,22 +163,23 @@
logger.error("Unexpected rc code %s and description %s\n",
err_no, desc)
return FAIL
- elif curr_cim_rev >= libvirt_cim_child_pool_rev:
- n_list = net_list(server, virt)
- for _net_name in n_list:
- net_xml = NetXML(server=server, networkname=_net_name,
- virt=virt, is_new_net=False)
- pool_use_attr = net_xml.xml_get_netpool_attr_list()
- if pool_attr_list['Address'] in pool_use_attr:
- logger.error("IP address is in use by a different network")
- return FAIL
+ elif curr_cim_rev >= libvirt_cim_child_pool_rev:
+ if pool_type == "NetworkPool":
+ n_list = net_list(server, virt)
+ for _net_name in n_list:
+ net_xml = NetXML(server=server, networkname=_net_name,
+ virt=virt, is_new_net=False)
+ pool_use_attr = net_xml.xml_get_netpool_attr_list()
+ if pool_attr_list['Address'] in pool_use_attr:
+ logger.error("IP address is in use by a different
network")
+ return FAIL
- net_pool_rasds = get_pool_rasds(server, virt)
+ net_pool_rasds = get_pool_rasds(server, virt, pool_type)
if len(net_pool_rasds) == 0:
- logger.error("We can not get NetPoolRASDs")
+ logger.error("We can not get PoolRASDs")
return FAIL
else:
- net_pool_rasds[0]['PoolID'] = "NetworkPool/%s" %
test_pool
+ net_pool_rasds[0]['PoolID'] = "%s/%s" % (pool_type,
test_pool)
for attr, val in pool_attr_list.iteritems():
net_pool_rasds[0][attr] = val
These changes conflict with the latest changes submitted for the "#3
Update RPCS/04 with the latest updatesof pool verification".
Are you planning to modify "#3 Update RPCS/04 with the latest updatesof
pool verification" to use the above changes as well.
@@ -194,7 +196,8 @@
return status
-def verify_pool(server, virt, pooltype, poolname, pool_attr_list):
+def verify_pool(server, virt, pooltype, poolname, pool_attr_list,
+ pool_type="NetworkPool"):
status = FAIL
pool_list = EnumInstances(server, pooltype)
if len(pool_list) < 1:
@@ -202,16 +205,20 @@
len(pool_list))
return FAIL
- poolid = "NetworkPool/%s" % poolname
+ poolid = "%s/%s" % (pool_type, poolname)
for i in range(0, len(pool_list)):
ret_pool = pool_list[i].InstanceID
if ret_pool != poolid:
continue
- net_xml = NetXML(server, virt=virt, networkname=poolname,
- is_new_net=False)
- ret_pool_attr_list = net_xml.xml_get_netpool_attr_list()
-
+ if pool_type == "NetworkPool":
+ net_xml = NetXML(server, virt=virt, networkname=poolname,
+ is_new_net=False)
+ ret_pool_attr_list = net_xml.xml_get_netpool_attr_list()
+ elif pool_type == "DiskPool":
+ disk_xml = PoolXML(server ,virt=virt, poolname=poolname,
+ is_new_pool=False)
+ ret_pool_attr_list = disk_xml.xml_get_pool_attr_list()
for i in range(0, len(ret_pool_attr_list)):
if ret_pool_attr_list[i] not in pool_attr_list.itervalues():
logger.error("Got error when parsing %s",
ret_pool_attr_list[i])
diff -r 6dc2d815e480 -r 0666f518db98 suites/libvirt-cim/lib/XenKvmLib/vxml.py
--- a/suites/libvirt-cim/lib/XenKvmLib/vxml.py Sun May 17 23:34:58 2009 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/vxml.py Tue May 19 01:12:22 2009 -0700
@@ -291,7 +291,7 @@
class PoolXML(Virsh, XMLClass):
def __init__(self, server, poolname=const.default_pool_name,
- virt='xen'):
+ virt='xen', is_new_pool=True):
XMLClass.__init__(self)
if virt == 'XenFV':
@@ -300,6 +300,17 @@
self.pool_name = poolname
self.server = server
+ if is_new_pool is False:
+ cmd = "virsh pool-dumpxml %s" % self.pool_name
+ s, disk_xml = utils.run_remote(server, cmd)
+ if s != 0:
+ logger.error("Encounter error dump netxml")
+ return None
+ else:
+ self.xml_string = disk_xml
+ self.xdoc = minidom.parseString(self.xml_string)
+ return
+
pool = self.add_sub_node(self.xdoc, 'pool', type='dir')
self.add_sub_node(pool, 'name', self.pool_name)
target = self.add_sub_node(pool, 'target')
@@ -315,6 +326,12 @@
dpoolname = self.get_value_xpath('/pool/name')
return dpoolname
+ def xml_get_pool_attr_list(self):
+ pool_attr_list = []
+ poolpath = self.get_value_xpath('/pool/target/path')
+ pool_attr_list.append(poolpath)
+
+ return pool_attr_list
class VirtXML(Virsh, XMLClass):
"""Base class for all XML generation &
operation"""
_______________________________________________
Libvirt-cim mailing list
Libvirt-cim(a)redhat.com
https://www.redhat.com/mailman/listinfo/libvirt-cim