[PATCH] [TEST] Add timestamps to main.py to calculate run time of tests
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1252022738 25200
# Node ID 2d852ba88fd24102ec988145e464a13f5faae5c0
# Parent db3af9cb2c9affb0a32a8ea3a2c23648c5efe91e
[TEST] Add timestamps to main.py to calculate run time of tests
These changes allow the user to specify the --print-exec-time flag, which will
print the execution time of each test. If this flag isn't specified, the
total run time of the test is still printed.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r db3af9cb2c9a -r 2d852ba88fd2 suites/libvirt-cim/main.py
--- a/suites/libvirt-cim/main.py Thu Sep 03 13:03:52 2009 -0700
+++ b/suites/libvirt-cim/main.py Thu Sep 03 17:05:38 2009 -0700
@@ -22,6 +22,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+from time import time
from optparse import OptionParser
import os
import sys
@@ -64,6 +65,9 @@
help="Duplicate the output to stderr")
parser.add_option("--report", dest="report",
help="Send report using mail info: --report=<recipient addr>")
+parser.add_option("--print-exec-time", action="store_true",
+ dest="print_exec_time",
+ help="Print execution time of each test")
TEST_SUITE = 'cimtest'
CIMTEST_RCFILE = '%s/.cimtestrc' % os.environ['HOME']
@@ -146,6 +150,27 @@
return PASS
+def print_exec_time(testsuite, exec_time):
+
+ #Convert run time from seconds to hours
+ tmp = exec_time / (60 * 60)
+ h = int(tmp)
+
+ #Subtract out hours and convert remainder to minutes
+ tmp = (tmp - h) * 60
+ m = int(tmp)
+
+ #Subtract out minutes and convert remainder to seconds
+ tmp = (tmp - m) * 60
+ s = int(tmp)
+
+ #Subtract out seconds and convert remainder to milliseconds
+ tmp = (tmp - s) * 1000
+ msec = int(tmp)
+
+ testsuite.debug(" Execution time: %sh %smin %ssec %smsec" %
+ (h, m, s, msec))
+
def main():
(options, args) = parser.parse_args()
to_addr = None
@@ -213,6 +238,8 @@
print "\nTesting " + options.virt + " hypervisor"
+ test_run_time_total = 0
+
for test in test_list:
testsuite.debug(div)
t_path = os.path.join(TEST_SUITE, test['group'])
@@ -222,13 +249,25 @@
options.virt, dbg,
options.t_url)
cmd = cdto + ' && ' + ' ' + run
+ start_time = time()
status, output = commands.getstatusoutput(cmd)
+ end_time = time()
os_status = os.WEXITSTATUS(status)
testsuite.print_results(test['group'], test['test'], os_status, output)
+ exec_time = end_time - start_time
+ test_run_time_total = test_run_time_total + exec_time
+
+ if options.print_exec_time:
+ print_exec_time(testsuite, exec_time)
+
testsuite.debug("%s\n" % div)
+ testsuite.debug("Total test execution: ")
+ print_exec_time(testsuite, test_run_time_total)
+ testsuite.debug("\n")
+
testsuite.finish()
status = cleanup_env(options.ip, options.virt)
15 years, 1 month
[PATCH] Cleanup _get_rasds() in Virt_RASD.c
by Jim Fehlig
# HG changeset patch
# User Jim Fehlig <jfehlig(a)novell.com>
# Date 1253641563 21600
# Node ID 81b6cd4ae355024303a8459817b4f15339d17111
# Parent 7c5106b0b092147c521ef1f462b9a41a44a313f8
Cleanup _get_rasds() in Virt_RASD.c
I received a bug report about a memory leak in _get_rasds(). While
fixing the leak, I took the opportunity to do some other tidying in
this function.
Signed-off-by: Jim Fehlig <jfehlig(a)novell.com>
diff -r 7c5106b0b092 -r 81b6cd4ae355 src/Virt_RASD.c
--- a/src/Virt_RASD.c Wed Sep 16 11:49:21 2009 -0700
+++ b/src/Virt_RASD.c Tue Sep 22 11:46:03 2009 -0600
@@ -664,6 +664,7 @@
int count;
int i;
struct virt_device *devs = NULL;
+ const char *host = NULL;
count = get_devices(dom, &devs, type);
if (count <= 0)
@@ -672,8 +673,13 @@
/* Bit hackish, but for proc we need to cut list down to one. */
if (type == CIM_RES_TYPE_PROC) {
struct virt_device *tmp_dev = NULL;
- tmp_dev = calloc(1, sizeof(*tmp_dev));
tmp_dev = virt_device_dup(&devs[count - 1]);
+ if (tmp_dev == NULL) {
+ cu_statusf(broker, &s,
+ CMPI_RC_ERR_FAILED,
+ "Failed to allocate memory for proc RASD");
+ goto out;
+ }
tmp_dev->id = strdup("proc");
@@ -685,15 +691,16 @@
count = 1;
}
+ host = virDomainGetName(dom);
+ if (host == NULL) {
+ cu_statusf(broker, &s,
+ CMPI_RC_ERR_FAILED,
+ "Failed to get domain name");
+ goto out;
+ }
+
for (i = 0; i < count; i++) {
CMPIInstance *dev = NULL;
- const char *host = NULL;
-
- host = virDomainGetName(dom);
- if (host == NULL) {
- cleanup_virt_device(&devs[i]);
- continue;
- }
dev = rasd_from_vdev(broker,
&devs[i],
15 years, 1 month
[PATCH] [TEST] #2 Add new tc RASDIndications/01_guest_states_rasd_ind.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253654207 25200
# Node ID faf86189f60a2b7e5321996540c390c0598929c9
# Parent f5c62f54d1204d38ce15e48d269d3e887da69937
[TEST] #2 Add new tc RASDIndications/01_guest_states_rasd_ind.py
Patch 2:
--------
1) Checked for RASDIndication support in libvirt-cim
2) Included support for XenFV
To verify the Add|Deleted RASDIndication for the guest.
Tested with Xen and current sources on RHEL5.3 and KVM with F10.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r f5c62f54d120 -r faf86189f60a suites/libvirt-cim/cimtest/RASDIndications/01_guest_states_rasd_ind.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/RASDIndications/01_guest_states_rasd_ind.py Tue Sep 22 14:16:47 2009 -0700
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# This testcase is used to verify the Created|Deleted
+# RASD Indications for a guest.
+#
+# Date : 21-09-2009
+#
+
+import sys
+from signal import SIGKILL
+from socket import gethostname
+from os import kill, fork, _exit
+from XenKvmLib.vxml import get_class
+from XenKvmLib.xm_virt_util import active_domain_list
+from CimTest.Globals import logger
+from XenKvmLib.const import do_main, CIM_ENABLE, CIM_DISABLE, \
+ get_provider_version
+from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from XenKvmLib.common_util import poll_for_state_change
+from XenKvmLib.indications import sub_ind, handle_request, poll_for_ind
+
+sup_types = ['KVM', 'Xen', 'XenFV']
+libvirt_guest_rasd_indication_rev = 980
+
+def create_guest(test_dom, ip, virt, cxml, ind_name):
+ try:
+ ret = cxml.cim_define(ip)
+ if not ret:
+ raise Exception("Failed to define domain %s" % test_dom)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_DISABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_DISABLE))
+
+ ret = cxml.cim_start(ip)
+ if ret:
+ raise Exception("Failed to start the domain '%s'" % test_dom)
+ cxml.undefine(ip)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_ENABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_ENABLE))
+
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+def gen_indication(test_dom, s_sysname, virt, cxml, ind_name):
+ status = FAIL
+ try:
+ active_doms = active_domain_list(s_sysname, virt)
+ if test_dom not in active_doms:
+ status, cxml = create_guest(test_dom, s_sysname, virt, cxml, ind_name)
+ if status != PASS:
+ raise Exception("Error setting up the guest '%s'" % test_dom)
+
+ if ind_name == "delete":
+ ret = cxml.cim_destroy(s_sysname)
+ if not ret:
+ raise Exception("Failed to destroy domain '%s'" % test_dom)
+
+ except Exception, details:
+ logger.error("Exception details :%s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+@do_main(sup_types)
+def main():
+ options = main.options
+ virt = options.virt
+ s_sysname = options.ip
+
+ cim_rev, changeset = get_provider_version(virt, s_sysname)
+ if cim_rev < libvirt_guest_rasd_indication_rev:
+ logger.info("Support for Guest Resource Indications is available in "
+ "Libvirt-CIM rev '%s'", libvirt_guest_rasd_indication_rev)
+ return SKIP
+
+ status = FAIL
+ test_dom = 'VM_' + gethostname()
+ ind_names = {
+ 'create' : 'ResourceAllocationSettingDataCreatedIndication',
+ 'delete' : 'ResourceAllocationSettingDataDeletedIndication'
+ }
+
+ virt_xml = get_class(virt)
+ cxml = virt_xml(test_dom)
+ sub_list, ind_names, dict = sub_ind(s_sysname, virt, ind_names)
+ for ind in ind_names.keys():
+ sub = sub_list[ind]
+ ind_name = ind_names[ind]
+ logger.info("\n Verifying '%s' indications ....", ind_name)
+
+ try:
+ pid = fork()
+ if pid == 0:
+ status = handle_request(sub, ind_name, dict,
+ len(ind_names.keys()))
+ if status != PASS:
+ _exit(1)
+ _exit(0)
+ else:
+ try:
+ status, cxml = gen_indication(test_dom, s_sysname,
+ virt, cxml, ind)
+ if status != PASS:
+ raise Exception("Unable to generate indication")
+
+ status = poll_for_ind(pid, ind_name)
+ except Exception, details:
+ kill(pid, SIGKILL)
+ raise Exception(details)
+
+ except Exception, details:
+ logger.error("Exception: %s", details)
+ status = FAIL
+
+ if status != PASS:
+ break
+
+ #Make sure all subscriptions are really unsubscribed
+ for ind, sub in sub_list.iteritems():
+ sub.unsubscribe(dict['default_auth'])
+ logger.info("Cancelling subscription for %s", ind_names[ind])
+
+ active_doms = active_domain_list(s_sysname, virt)
+ if test_dom in active_doms:
+ ret = cxml.cim_destroy(s_sysname)
+ if not ret:
+ logger.error("Failed to Destroy the domain")
+ return FAIL
+
+ return status
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 1 month
[PATCH] [TEST] #2 [TEST] Add new tc RASDIndications/02_guest_add_mod_rem_rasd_ind.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253655145 25200
# Node ID 215cbc24f8f95f95543a24ecc7e3b1d80594ecdd
# Parent faf86189f60a2b7e5321996540c390c0598929c9
[TEST] #2 [TEST] Add new tc RASDIndications/02_guest_add_mod_rem_rasd_ind.py
Patch 2:
--------
1) Checked for RASDIndication support in libvirt-cim
2) Included support for XenFV
3) Removed cim_start() fromt the testcase
4) Undefined the guest at the end of the test.
To verify the Add|Modify|Deleted RASDIndication for the guest.
Tested with Xen and current sources on RHEL5.3 and with KVM on F10.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r faf86189f60a -r 215cbc24f8f9 suites/libvirt-cim/cimtest/RASDIndications/02_guest_add_mod_rem_rasd_ind.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/RASDIndications/02_guest_add_mod_rem_rasd_ind.py Tue Sep 22 14:32:25 2009 -0700
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# This testcase is used to verify the Created|Modified|Deleted
+# RASD Indications for a guest.
+#
+# Date : 21-09-2009
+#
+
+import sys
+from signal import SIGKILL
+from XenKvmLib import vsms
+from XenKvmLib import vsms_util
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.enumclass import EnumNames
+from socket import gethostname
+from os import kill, fork, _exit
+from XenKvmLib.vxml import get_class
+from CimTest.Globals import logger
+from XenKvmLib.const import do_main, CIM_DISABLE, get_provider_version
+from CimTest.ReturnCodes import PASS, FAIL, SKIP
+from XenKvmLib.common_util import poll_for_state_change
+from XenKvmLib.indications import sub_ind, handle_request, poll_for_ind
+
+sup_types = ['KVM', 'Xen', 'XenFV']
+libvirt_guest_rasd_indication_rev = 980
+
+nmem = 256
+nmac = '00:11:22:33:44:55'
+
+def create_guest(test_dom, ip, virt, cxml):
+ try:
+ ret = cxml.cim_define(ip)
+ if not ret:
+ raise Exception("Failed to define domain %s" % test_dom)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_DISABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_DISABLE))
+
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+
+def get_rasd_rec(virt, cn, s_sysname, inst_id):
+ classname = get_typed_class(virt, cn)
+ recs = EnumNames(s_sysname, classname)
+ rasd = None
+ for rasd_rec in recs:
+ ret_pool = rasd_rec['InstanceID']
+ if ret_pool == inst_id:
+ rasd = rasd_rec
+ break
+
+ return rasd
+
+def gen_indication(test_dom, s_sysname, virt, cxml, service, ind_name,
+ rasd=None, nmem_disk=None):
+ status = FAIL
+ try:
+
+ if ind_name == "add":
+ cn = 'VirtualSystemSettingData'
+ inst_id = '%s:%s' % (virt, test_dom)
+ classname = get_typed_class(virt, cn)
+ vssd_ref = get_rasd_rec(virt, cn, s_sysname, inst_id)
+
+ if vssd_ref == None:
+ raise Exception("Failed to get vssd_ref for '%s'" % test_dom)
+
+ status = vsms_util.add_disk_res(s_sysname, service, cxml,
+ vssd_ref, rasd, nmem_disk)
+
+ elif ind_name == "modify":
+ status = vsms_util.mod_mem_res(s_sysname, service, cxml,
+ rasd, nmem_disk)
+
+ elif ind_name == 'delete':
+ cn = 'GraphicsResourceAllocationSettingData'
+ inst_id = '%s/%s' % (test_dom, "graphics")
+ classname = get_typed_class(virt, cn)
+ nrasd = get_rasd_rec(virt, cn, s_sysname, inst_id)
+
+ if nrasd == None:
+ raise Exception("Failed to get nrasd for '%s'" % test_dom)
+
+ res = service.RemoveResourceSettings(ResourceSettings=[nrasd])
+ status = res[0]
+
+ except Exception, details:
+ logger.error("Exception details :%s", details)
+ return FAIL
+
+ return status
+
+@do_main(sup_types)
+def main():
+ options = main.options
+ virt = options.virt
+ s_sysname = options.ip
+
+ cim_rev, changeset = get_provider_version(virt, s_sysname)
+ if cim_rev < libvirt_guest_rasd_indication_rev:
+ logger.info("Support for Guest Resource Indications is available in "
+ "Libvirt-CIM rev '%s'", libvirt_guest_rasd_indication_rev)
+ return SKIP
+
+ status = FAIL
+ test_dom = 'VM_' + gethostname()
+ ind_names = {
+ 'add' : 'ResourceAllocationSettingDataCreatedIndication',
+ 'modify' : 'ResourceAllocationSettingDataModifiedIndication',
+ 'delete' : 'ResourceAllocationSettingDataDeletedIndication'
+ }
+
+ sub_list, ind_names, dict = sub_ind(s_sysname, virt, ind_names)
+ virt_xml = get_class(virt)
+ cxml = virt_xml(test_dom, mac=nmac)
+ service = vsms.get_vsms_class(options.virt)(options.ip)
+ ndpath = cxml.secondary_disk_path
+
+ if virt == 'KVM':
+ nddev = 'hdb'
+ else:
+ nddev = 'xvdb'
+
+ disk_attr = { 'nddev' : nddev,
+ 'src_path' : ndpath
+ }
+ dasd = vsms.get_dasd_class(options.virt)(dev=nddev,
+ source=cxml.secondary_disk_path,
+ name=test_dom)
+ masd = vsms.get_masd_class(options.virt)(megabytes=nmem, name=test_dom)
+ rasd_info = { 'add' : [dasd, disk_attr],
+ 'modify' : [masd, nmem]
+ }
+
+ status, cxml = create_guest(test_dom, s_sysname, virt, cxml)
+ if status != PASS:
+ logger.error("Error setting up the guest '%s'" % test_dom)
+ return FAIL
+
+ for ind in ind_names.keys():
+ sub = sub_list[ind]
+ ind_name = ind_names[ind]
+ logger.info("\n Verifying '%s' indications ....", ind_name)
+
+ try:
+ pid = fork()
+ if pid == 0:
+ status = handle_request(sub, ind_name, dict,
+ len(ind_names.keys()))
+ if status != PASS:
+ _exit(1)
+
+ _exit(0)
+ else:
+ try:
+ if ind != 'delete':
+ rasd = rasd_info[ind][0]
+ val = rasd_info[ind][1]
+ status = gen_indication(test_dom, s_sysname,
+ virt, cxml, service,
+ ind, rasd, val)
+ else:
+ status = gen_indication(test_dom, s_sysname,
+ virt, cxml, service,
+ ind)
+ if status != PASS:
+ raise Exception("Unable to generate indication")
+
+ status = poll_for_ind(pid, ind_name)
+ if status != PASS:
+ raise Exception("Poll for indication Failed")
+
+ except Exception, details:
+ kill(pid, SIGKILL)
+ raise Exception(details)
+
+ except Exception, details:
+ logger.error("Exception: %s", details)
+ status = FAIL
+
+ if status != PASS:
+ break
+
+ #Make sure all subscriptions are really unsubscribed
+ for ind, sub in sub_list.iteritems():
+ sub.unsubscribe(dict['default_auth'])
+ logger.info("Cancelling subscription for %s", ind_names[ind])
+
+ ret = cxml.undefine(s_sysname)
+ if not ret:
+ logger.error("Failed to undefine the domain '%s'", test_dom)
+ return FAIL
+
+ return status
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 1 month
[PATCH] [TEST] Add new tc RASDIndications/02_guest_add_mod_rem_rasd_ind.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253558705 25200
# Node ID 20f8f3d7e3ef6d943e3bbab928f8c9e5108262c6
# Parent 7b7fa4294f3602db5aca1d5958ebfd6dc849ef46
[TEST] Add new tc RASDIndications/02_guest_add_mod_rem_rasd_ind.py
To verify the Add|Modify|Deleted RASDIndication for the guest.
Tested with Xen and current sources on RHEL5.3.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 7b7fa4294f36 -r 20f8f3d7e3ef suites/libvirt-cim/cimtest/RASDIndications/02_guest_add_mod_rem_rasd_ind.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/RASDIndications/02_guest_add_mod_rem_rasd_ind.py Mon Sep 21 11:45:05 2009 -0700
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# This testcase is used to verify the Created|Modified|Deleted
+# RASD Indications for a guest.
+#
+# Date : 21-09-2009
+#
+
+import sys
+from signal import SIGKILL
+from XenKvmLib import vsms
+from XenKvmLib import vsms_util
+from XenKvmLib.classes import get_typed_class
+from XenKvmLib.enumclass import EnumNames
+from socket import gethostname
+from os import kill, fork, _exit
+from XenKvmLib.vxml import get_class
+from CimTest.Globals import logger
+from XenKvmLib.const import do_main, CIM_DISABLE, CIM_ENABLE
+from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.common_util import poll_for_state_change
+from XenKvmLib.indications import sub_ind, handle_request, poll_for_ind
+
+sup_types = ['KVM', 'Xen']
+
+nmem = 256
+nmac = '00:11:22:33:44:55'
+
+def create_guest(test_dom, ip, virt, cxml):
+ try:
+ ret = cxml.cim_define(ip)
+ if not ret:
+ raise Exception("Failed to define domain %s" % test_dom)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_DISABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_DISABLE))
+
+ ret = cxml.cim_start(ip)
+ if ret:
+ raise Exception("Failed to start the domain '%s'" % test_dom)
+ cxml.undefine(ip)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_ENABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_ENABLE))
+
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+
+def get_rasd_rec(virt, cn, s_sysname, inst_id):
+ classname = get_typed_class(virt, cn)
+ recs = EnumNames(s_sysname, classname)
+ rasd = None
+ for rasd_rec in recs:
+ ret_pool = rasd_rec['InstanceID']
+ if ret_pool == inst_id:
+ rasd = rasd_rec
+ break
+
+ return rasd
+
+def gen_indication(test_dom, s_sysname, virt, cxml, service, ind_name,
+ rasd=None, nmem_disk=None):
+ status = FAIL
+ try:
+
+ if ind_name == "add":
+ cn = 'VirtualSystemSettingData'
+ inst_id = '%s:%s' % (virt, test_dom)
+ classname = get_typed_class(virt, cn)
+ vssd_ref = get_rasd_rec(virt, cn, s_sysname, inst_id)
+
+ if vssd_ref == None:
+ raise Exception("Failed to get vssd_ref for '%s'" % test_dom)
+
+ status = vsms_util.add_disk_res(s_sysname, service, cxml,
+ vssd_ref, rasd, nmem_disk)
+
+ elif ind_name == "modify":
+ status = vsms_util.mod_mem_res(s_sysname, service, cxml,
+ rasd, nmem_disk)
+
+ elif ind_name == 'delete':
+ cn = 'GraphicsResourceAllocationSettingData'
+ inst_id = '%s/%s' % (test_dom, "graphics")
+ classname = get_typed_class(virt, cn)
+ nrasd = get_rasd_rec(virt, cn, s_sysname, inst_id)
+
+ if nrasd == None:
+ raise Exception("Failed to get nrasd for '%s'" % test_dom)
+
+ res = service.RemoveResourceSettings(ResourceSettings=[nrasd])
+ status = res[0]
+
+ except Exception, details:
+ logger.error("Exception details :%s", details)
+ return FAIL
+
+ return status
+
+@do_main(sup_types)
+def main():
+ options = main.options
+ virt = options.virt
+ s_sysname = options.ip
+
+ status = FAIL
+ test_dom = 'VM_' + gethostname()
+ ind_names = {
+ 'add' : 'ResourceAllocationSettingDataCreatedIndication',
+ 'modify' : 'ResourceAllocationSettingDataModifiedIndication',
+ 'delete' : 'ResourceAllocationSettingDataDeletedIndication'
+ }
+
+ sub_list, ind_names, dict = sub_ind(s_sysname, virt, ind_names)
+ virt_xml = get_class(virt)
+ cxml = virt_xml(test_dom, mac=nmac)
+ service = vsms.get_vsms_class(options.virt)(options.ip)
+ ndpath = cxml.secondary_disk_path
+
+ if virt == 'KVM':
+ nddev = 'hdb'
+ else:
+ nddev = 'xvdb'
+
+ disk_attr = { 'nddev' : nddev,
+ 'src_path' : ndpath
+ }
+ dasd = vsms.get_dasd_class(options.virt)(dev=nddev,
+ source=cxml.secondary_disk_path,
+ name=test_dom)
+ masd = vsms.get_masd_class(options.virt)(megabytes=nmem, name=test_dom)
+ rasd_info = { 'add' : [dasd, disk_attr],
+ 'modify' : [masd, nmem]
+ }
+
+ status, cxml = create_guest(test_dom, s_sysname, virt, cxml)
+ if status != PASS:
+ logger.error("Error setting up the guest '%s'" % test_dom)
+ return FAIL
+
+ for ind in ind_names.keys():
+ sub = sub_list[ind]
+ ind_name = ind_names[ind]
+ logger.info("\n Verifying '%s' indications ....", ind_name)
+
+ try:
+ pid = fork()
+ if pid == 0:
+ status = handle_request(sub, ind_name, dict,
+ len(ind_names.keys()))
+ if status != PASS:
+ _exit(1)
+
+ _exit(0)
+ else:
+ try:
+ if ind != 'delete':
+ rasd = rasd_info[ind][0]
+ val = rasd_info[ind][1]
+ status = gen_indication(test_dom, s_sysname,
+ virt, cxml, service,
+ ind, rasd, val)
+ else:
+ status = gen_indication(test_dom, s_sysname,
+ virt, cxml, service,
+ ind)
+ if status != PASS:
+ raise Exception("Unable to generate indication")
+
+ status = poll_for_ind(pid, ind_name)
+ if status != PASS:
+ raise Exception("Poll for indication Failed")
+
+ except Exception, details:
+ kill(pid, SIGKILL)
+ raise Exception(details)
+
+ except Exception, details:
+ logger.error("Exception: %s", details)
+ status = FAIL
+
+ if status != PASS:
+ break
+
+ #Make sure all subscriptions are really unsubscribed
+ for ind, sub in sub_list.iteritems():
+ sub.unsubscribe(dict['default_auth'])
+ logger.info("Cancelling subscription for %s", ind_names[ind])
+
+ ret = cxml.cim_destroy(s_sysname)
+ if not ret:
+ logger.error("Failed to destroy the domain '%s'", test_dom)
+ return FAIL
+
+ return status
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 1 month
[PATCH] [TEST] Add new tc RASDIndications/01_guest_states_rasd_ind.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253558547 25200
# Node ID 7b7fa4294f3602db5aca1d5958ebfd6dc849ef46
# Parent f5c62f54d1204d38ce15e48d269d3e887da69937
[TEST] Add new tc RASDIndications/01_guest_states_rasd_ind.py
To verify the Add|Deleted RASDIndication for the guest.
Tested with Xen and current sources on RHEL5.3.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r f5c62f54d120 -r 7b7fa4294f36 suites/libvirt-cim/cimtest/RASDIndications/01_guest_states_rasd_ind.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/suites/libvirt-cim/cimtest/RASDIndications/01_guest_states_rasd_ind.py Mon Sep 21 11:42:27 2009 -0700
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+#
+# Copyright 2009 IBM Corp.
+#
+# Authors:
+# Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
+#
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#
+# This testcase is used to verify the Created|Deleted
+# RASD Indications for a guest.
+#
+# Date : 21-09-2009
+#
+
+import sys
+from signal import SIGKILL
+from socket import gethostname
+from os import kill, fork, _exit
+from XenKvmLib.vxml import get_class
+from XenKvmLib.xm_virt_util import active_domain_list
+from CimTest.Globals import logger
+from XenKvmLib.const import do_main, CIM_ENABLE, CIM_DISABLE
+from CimTest.ReturnCodes import PASS, FAIL
+from XenKvmLib.common_util import poll_for_state_change
+from XenKvmLib.indications import sub_ind, handle_request, poll_for_ind
+
+sup_types = ['KVM', 'Xen']
+
+def create_guest(test_dom, ip, virt, cxml, ind_name):
+ try:
+ ret = cxml.cim_define(ip)
+ if not ret:
+ raise Exception("Failed to define domain %s" % test_dom)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_DISABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_DISABLE))
+
+ ret = cxml.cim_start(ip)
+ if ret:
+ raise Exception("Failed to start the domain '%s'" % test_dom)
+ cxml.undefine(ip)
+
+ status, dom_cs = poll_for_state_change(ip, virt, test_dom,
+ CIM_ENABLE)
+ if status != PASS:
+ raise Exception("Dom '%s' not in expected state '%s'" \
+ % (test_dom, CIM_ENABLE))
+
+ except Exception, details:
+ logger.error("Exception details: %s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+def gen_indication(test_dom, s_sysname, virt, cxml, ind_name):
+ status = FAIL
+ try:
+ active_doms = active_domain_list(s_sysname, virt)
+ if test_dom not in active_doms:
+ status, cxml = create_guest(test_dom, s_sysname, virt, cxml, ind_name)
+ if status != PASS:
+ raise Exception("Error setting up the guest '%s'" % test_dom)
+
+ if ind_name == "delete":
+ ret = cxml.cim_destroy(s_sysname)
+ if not ret:
+ raise Exception("Failed to destroy domain '%s'" % test_dom)
+
+ except Exception, details:
+ logger.error("Exception details :%s", details)
+ return FAIL, cxml
+
+ return PASS, cxml
+
+@do_main(sup_types)
+def main():
+ options = main.options
+ virt = options.virt
+ s_sysname = options.ip
+
+ status = FAIL
+ test_dom = 'VM_' + gethostname()
+ ind_names = {
+ 'create' : 'ResourceAllocationSettingDataCreatedIndication',
+ 'delete' : 'ResourceAllocationSettingDataDeletedIndication'
+ }
+
+ virt_xml = get_class(virt)
+ cxml = virt_xml(test_dom)
+ sub_list, ind_names, dict = sub_ind(s_sysname, virt, ind_names)
+ for ind in ind_names.keys():
+ sub = sub_list[ind]
+ ind_name = ind_names[ind]
+ logger.info("\n Verifying '%s' indications ....", ind_name)
+
+ try:
+ pid = fork()
+ if pid == 0:
+ status = handle_request(sub, ind_name, dict,
+ len(ind_names.keys()))
+ if status != PASS:
+ _exit(1)
+ _exit(0)
+ else:
+ try:
+ status, cxml = gen_indication(test_dom, s_sysname,
+ virt, cxml, ind)
+ if status != PASS:
+ kill(pid, SIGKILL)
+ raise Exception("Unable to generate indication")
+
+ status = poll_for_ind(pid, ind_name)
+ except Exception, details:
+ kill(pid, SIGKILL)
+ raise Exception(details)
+
+ except Exception, details:
+ logger.error("Exception: %s", details)
+ status = FAIL
+
+ if status != PASS:
+ break
+
+ #Make sure all subscriptions are really unsubscribed
+ for ind, sub in sub_list.iteritems():
+ sub.unsubscribe(dict['default_auth'])
+ logger.info("Cancelling subscription for %s", ind_names[ind])
+
+ active_doms = active_domain_list(s_sysname, virt)
+ if test_dom in active_doms:
+ ret = cxml.cim_destroy(s_sysname)
+ if not ret:
+ logger.error("Failed to Destroy the domain")
+ return FAIL
+
+ return status
+if __name__ == "__main__":
+ sys.exit(main())
+
15 years, 1 month
[PATCH] This patch replaces get_previous_instance function with get_rasd_by_name()
by Sharad Mishra
# HG changeset patch
# User Sharad Mishra <snmishra(a)us.ibm.com>
# Date 1253292817 25200
# Node ID f916b221ea7e21b091d36ef841eb3bde1813798d
# Parent fc50acd35fe7f344e296441a88a00f42a7636ad6
This patch replaces get_previous_instance function with get_rasd_by_name().
Signed-off-by: Sharad Mishra <snmishra(a)us.ibm.com>
diff -r fc50acd35fe7 -r f916b221ea7e src/Virt_VirtualSystemManagementService.c
--- a/src/Virt_VirtualSystemManagementService.c Wed Sep 16 11:49:21 2009 -0700
+++ b/src/Virt_VirtualSystemManagementService.c Fri Sep 18 09:53:37 2009 -0700
@@ -2185,49 +2185,6 @@
return s;
}
-static CMPIInstance *get_previous_instance(struct domain *dominfo,
- const CMPIObjectPath *ref,
- uint16_t type,
- const char *devid)
-{
- CMPIStatus s;
- const char *props[] = {NULL};
- const char *inst_id;
- struct inst_list list;
- CMPIInstance *prev_inst = NULL;
- int i, ret;
-
- inst_list_init(&list);
- s = enum_rasds(_BROKER, ref, dominfo->name, type, props, &list);
- if (s.rc != CMPI_RC_OK) {
- CU_DEBUG("Failed to enumerate rasd");
- goto out;
- }
-
- for(i = 0; i < list.cur; i++) {
- prev_inst = list.list[i];
- ret = cu_get_str_prop(prev_inst,
- "InstanceID",
- &inst_id);
-
- if (ret != CMPI_RC_OK) {
- CU_DEBUG("Cannot get InstanceID ... ignoring");
- continue;
- }
-
- if (STREQ(inst_id, get_fq_devid(dominfo->name, (char *)devid)))
- break;
- }
-
- if (prev_inst == NULL)
- CU_DEBUG("PreviousInstance is NULL");
-
- out:
- inst_list_free(&list);
-
- return prev_inst;
-}
-
static CMPIStatus _update_resources_for(const CMPIContext *context,
const CMPIObjectPath *ref,
virDomainPtr dom,
@@ -2276,7 +2233,24 @@
}
else {
indication = strdup(RASD_IND_MODIFIED);
- prev_inst = get_previous_instance(dominfo, ref, type, devid);
+ char *dummy_name = NULL;
+
+ if (asprintf(&dummy_name, "%s/%s",dominfo->name, devid) == -1) {
+ CU_DEBUG("Unable to set name");
+ goto out;
+ }
+ s = get_rasd_by_name(_BROKER,
+ ref,
+ dummy_name,
+ type,
+ NULL,
+ &prev_inst);
+ free(dummy_name);
+
+ if (s.rc != CMPI_RC_OK) {
+ CU_DEBUG("Failed to get Previous Instance");
+ goto out;
+ }
}
s = func(dominfo, rasd, type, devid, NAMESPACE(ref));
15 years, 1 month
[PATCH] [TEST] Adding vol_delete and modifying RPCS/10_create_storagevolume.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253185946 14400
# Node ID 72616c6b52fe29ec35acd0f3c262b6c4247135ef
# Parent 26357e57d207c3437a06a0730e99c942111901f3
[TEST] Adding vol_delete and modifying RPCS/10_create_storagevolume.py
1) Adding vol_delete() to xm_virt_util.py to delete a volume of a pool.
2) Updating RPCS/10_create_storagevolume.py to include vol_delete.
Tested with KVM and current sources on SLES11.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 26357e57d207 -r 72616c6b52fe suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py
--- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Wed Sep 16 09:03:39 2009 -0400
+++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Thu Sep 17 07:12:26 2009 -0400
@@ -38,7 +38,7 @@
from XenKvmLib import rpcs_service
from XenKvmLib.assoc import Associators
from XenKvmLib.enumclass import GetInstance, EnumNames
-from XenKvmLib.xm_virt_util import virsh_version, vol_list
+from XenKvmLib.xm_virt_util import virsh_version, vol_list, vol_delete
from XenKvmLib.classes import get_typed_class, inst_to_mof
from XenKvmLib.common_util import destroy_diskpool
from XenKvmLib.pool import create_pool, undefine_diskpool, DIR_POOL
@@ -129,9 +129,22 @@
return PASS
-def cleanup_pool_vol(server, virt, pool_name, clean_vol, exp_vol_path):
+def cleanup_pool_vol(server, virt, pool_name, clean_pool, vol_path):
+ status = res = FAIL
+ ret = None
try:
- if clean_vol == True:
+ ret = vol_delete(server, virt, vol_name, pool_name)
+ if ret == None:
+ logger.error("Failed to delete the volume '%s'", vol_name)
+
+ if os.path.exists(vol_path):
+ cmd = "rm -rf %s" % vol_path
+ res, out = utils.run_remote(server, cmd)
+ if res != 0:
+ logger.error("'%s' was not removed, please remove it "
+ "manually", vol_path)
+
+ if clean_pool == True:
status = destroy_diskpool(server, virt, pool_name)
if status != PASS:
raise Exception("Unable to destroy diskpool '%s'" % pool_name)
@@ -140,16 +153,18 @@
if status != PASS:
raise Exception("Unable to undefine diskpool '%s'" \
% pool_name)
+
+
except Exception, details:
logger.error("Exception details: %s", details)
+ status = FAIL
+
+ if (ret == None and res != PASS) or (clean_pool == True and status != PASS):
+ logger.error("Failed to clean the env.....")
return FAIL
-
- if os.path.exists(exp_vol_path):
- cmd = "rm -rf %s" % exp_vol_path
- ret, out = utils.run_remote(server, cmd)
- if ret != 0:
- logger.info("'%s' was not removed, please remove it manually",
- exp_vol_path)
+ else:
+ logger.info("DEBUG PAssed ")
+
return PASS
@do_main(platform_sup)
@@ -211,18 +226,19 @@
found = verify_vol(server, virt, pool_name, exp_vol_path, found)
stovol_status = verify_sto_vol_rasd(virt, server, dp_inst_id,
exp_vol_path)
+
+ ret = cleanup_pool_vol(server, virt, pool_name,
+ clean_pool, exp_vol_path)
+ if res[0] == PASS and found == 1 and \
+ ret == PASS and stovol_status == PASS:
+ status = PASS
+ else:
+ return FAIL
except Exception, details:
logger.error("Exception details: %s", details)
status = FAIL
- ret = cleanup_pool_vol(server, virt, pool_name,
- clean_pool, exp_vol_path)
- if res[0] == PASS and found == 1 and \
- ret == PASS and stovol_status == PASS:
- status = PASS
- else:
- return FAIL
return status
if __name__ == "__main__":
diff -r 26357e57d207 -r 72616c6b52fe suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py
--- a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py Wed Sep 16 09:03:39 2009 -0400
+++ b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py Thu Sep 17 07:12:26 2009 -0400
@@ -238,9 +238,9 @@
return names
def vol_list(server, virt="KVM", pool_name=None):
- """ Function to list the volumes part of a pool"""
+ """ Function to list the volumes of a pool"""
- cmd = " virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" \
+ cmd = "virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" \
% (virt2uri(virt), pool_name)
ret, out = utils.run_remote(server, cmd)
if ret != 0:
@@ -248,6 +248,18 @@
return out
+def vol_delete(server, virt="KVM", vol_name=None, pool_name=None):
+ """ Function to delete the volume of a pool"""
+
+ cmd = "virsh -c %s vol-delete %s --pool %s"\
+ % (virt2uri(virt), vol_name, pool_name)
+ ret, out = utils.run_remote(server, cmd)
+ if ret != 0:
+ return None
+
+ return out
+
+
def virsh_vcpuinfo(server, dom, virt="Xen"):
cmd = "virsh -c %s vcpuinfo %s | grep VCPU | wc -l" % (virt2uri(virt),
dom)
15 years, 1 month
[PATCH] [TEST] Adding vol_delete and modifying RPCS/10_create_storagevolume.py
by Deepti B. Kalakeri
# HG changeset patch
# User Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
# Date 1253186627 14400
# Node ID 0387cadda7d381253e2645a0bd9ff8bfd9990fa6
# Parent 26357e57d207c3437a06a0730e99c942111901f3
[TEST] Adding vol_delete and modifying RPCS/10_create_storagevolume.py
1) Adding vol_delete() to xm_virt_util.py to delete a volume of a pool.
2) Updating RPCS/10_create_storagevolume.py to include vol_delete.
Tested with KVM and current sources on SLES11.
Signed-off-by: Deepti B. Kalakeri <deeptik(a)linux.vnet.ibm.com>
diff -r 26357e57d207 -r 0387cadda7d3 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py
--- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Wed Sep 16 09:03:39 2009 -0400
+++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/10_create_storagevolume.py Thu Sep 17 07:23:47 2009 -0400
@@ -38,7 +38,7 @@
from XenKvmLib import rpcs_service
from XenKvmLib.assoc import Associators
from XenKvmLib.enumclass import GetInstance, EnumNames
-from XenKvmLib.xm_virt_util import virsh_version, vol_list
+from XenKvmLib.xm_virt_util import virsh_version, vol_list, vol_delete
from XenKvmLib.classes import get_typed_class, inst_to_mof
from XenKvmLib.common_util import destroy_diskpool
from XenKvmLib.pool import create_pool, undefine_diskpool, DIR_POOL
@@ -129,9 +129,22 @@
return PASS
-def cleanup_pool_vol(server, virt, pool_name, clean_vol, exp_vol_path):
+def cleanup_pool_vol(server, virt, pool_name, clean_pool, vol_path):
+ status = res = FAIL
+ ret = None
try:
- if clean_vol == True:
+ ret = vol_delete(server, virt, vol_name, pool_name)
+ if ret == None:
+ logger.error("Failed to delete the volume '%s'", vol_name)
+
+ if os.path.exists(vol_path):
+ cmd = "rm -rf %s" % vol_path
+ res, out = utils.run_remote(server, cmd)
+ if res != 0:
+ logger.error("'%s' was not removed, please remove it "
+ "manually", vol_path)
+
+ if clean_pool == True:
status = destroy_diskpool(server, virt, pool_name)
if status != PASS:
raise Exception("Unable to destroy diskpool '%s'" % pool_name)
@@ -140,16 +153,16 @@
if status != PASS:
raise Exception("Unable to undefine diskpool '%s'" \
% pool_name)
+
+
except Exception, details:
logger.error("Exception details: %s", details)
+ status = FAIL
+
+ if (ret == None and res != PASS) or (clean_pool == True and status != PASS):
+ logger.error("Failed to clean the env.....")
return FAIL
-
- if os.path.exists(exp_vol_path):
- cmd = "rm -rf %s" % exp_vol_path
- ret, out = utils.run_remote(server, cmd)
- if ret != 0:
- logger.info("'%s' was not removed, please remove it manually",
- exp_vol_path)
+
return PASS
@do_main(platform_sup)
@@ -211,18 +224,19 @@
found = verify_vol(server, virt, pool_name, exp_vol_path, found)
stovol_status = verify_sto_vol_rasd(virt, server, dp_inst_id,
exp_vol_path)
+
+ ret = cleanup_pool_vol(server, virt, pool_name,
+ clean_pool, exp_vol_path)
+ if res[0] == PASS and found == 1 and \
+ ret == PASS and stovol_status == PASS:
+ status = PASS
+ else:
+ return FAIL
except Exception, details:
logger.error("Exception details: %s", details)
status = FAIL
- ret = cleanup_pool_vol(server, virt, pool_name,
- clean_pool, exp_vol_path)
- if res[0] == PASS and found == 1 and \
- ret == PASS and stovol_status == PASS:
- status = PASS
- else:
- return FAIL
return status
if __name__ == "__main__":
diff -r 26357e57d207 -r 0387cadda7d3 suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py
--- a/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py Wed Sep 16 09:03:39 2009 -0400
+++ b/suites/libvirt-cim/lib/XenKvmLib/xm_virt_util.py Thu Sep 17 07:23:47 2009 -0400
@@ -238,9 +238,9 @@
return names
def vol_list(server, virt="KVM", pool_name=None):
- """ Function to list the volumes part of a pool"""
+ """ Function to list the volumes of a pool"""
- cmd = " virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" \
+ cmd = "virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" \
% (virt2uri(virt), pool_name)
ret, out = utils.run_remote(server, cmd)
if ret != 0:
@@ -248,6 +248,18 @@
return out
+def vol_delete(server, virt="KVM", vol_name=None, pool_name=None):
+ """ Function to delete the volume of a pool"""
+
+ cmd = "virsh -c %s vol-delete %s --pool %s"\
+ % (virt2uri(virt), vol_name, pool_name)
+ ret, out = utils.run_remote(server, cmd)
+ if ret != 0:
+ return None
+
+ return out
+
+
def virsh_vcpuinfo(server, dom, virt="Xen"):
cmd = "virsh -c %s vcpuinfo %s | grep VCPU | wc -l" % (virt2uri(virt),
dom)
15 years, 1 month