[PATCH] Consolidate redundant code in for generating XML for net devices
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1239906021 25200
# Node ID abf66df18db6275e312f8024ddadfd6a2b252bb1
# Parent 261a49710d0cb13c83e28f460583079db473d04f
Consolidate redundant code in for generating XML for net devices
Most of the interface specific functions have the same code, so this can all
be moved into the common function.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 261a49710d0c -r abf66df18db6 libxkutil/xmlgen.c
--- a/libxkutil/xmlgen.c Thu Apr 16 11:14:10 2009 -0700
+++ b/libxkutil/xmlgen.c Thu Apr 16 11:20:21 2009 -0700
@@ -153,84 +153,45 @@
return msg;
}
-static const char *bridge_net_to_xml(xmlNodePtr root, struct net_device *dev)
+static const char *set_net_source(xmlNodePtr nic,
+ struct net_device *dev,
+ const char *src_type)
+{
+ xmlNodePtr tmp;
+
+ if (dev->source != NULL) {
+ tmp = xmlNewChild(nic, NULL, BAD_CAST "source", NULL);
+ if (tmp == NULL)
+ return XML_ERROR;
+ xmlNewProp(tmp, BAD_CAST src_type, BAD_CAST dev->source);
+ }
+
+ return NULL;
+}
+
+
+static const char *bridge_net_to_xml(xmlNodePtr nic, struct net_device *dev)
{
const char *script = "vif-bridge";
- xmlNodePtr nic;
xmlNodePtr tmp;
-
- nic = xmlNewChild(root, NULL, BAD_CAST "interface", NULL);
- if (nic == NULL)
- return XML_ERROR;
- xmlNewProp(nic, BAD_CAST "type", BAD_CAST dev->type);
-
- tmp = xmlNewChild(nic, NULL, BAD_CAST "mac", NULL);
- if (tmp == NULL)
- return XML_ERROR;
- xmlNewProp(tmp, BAD_CAST "address", BAD_CAST dev->mac);
+ const char *msg = NULL;
tmp = xmlNewChild(nic, NULL, BAD_CAST "script", NULL);
if (tmp == NULL)
return XML_ERROR;
xmlNewProp(tmp, BAD_CAST "path", BAD_CAST script);
- if (dev->source != NULL) {
- tmp = xmlNewChild(nic, NULL, BAD_CAST "source", NULL);
- if (tmp == NULL)
- return XML_ERROR;
- xmlNewProp(tmp, BAD_CAST "bridge", BAD_CAST dev->source);
- }
+ msg = set_net_source(nic, dev, "bridge");
- return NULL;
-}
-
-static const char *network_net_to_xml(xmlNodePtr root, struct net_device *dev)
-{
- xmlNodePtr nic;
- xmlNodePtr tmp;
-
- nic = xmlNewChild(root, NULL, BAD_CAST "interface", NULL);
- if (nic == NULL)
- return XML_ERROR;
- xmlNewProp(nic, BAD_CAST "type", BAD_CAST dev->type);
-
- tmp = xmlNewChild(nic, NULL, BAD_CAST "mac", NULL);
- if (tmp == NULL)
- return XML_ERROR;
- xmlNewProp(tmp, BAD_CAST "address", BAD_CAST dev->mac);
-
- if (dev->source != NULL) {
- tmp = xmlNewChild(nic, NULL, BAD_CAST "source", NULL);
- if (tmp == NULL)
- return XML_ERROR;
- xmlNewProp(tmp, BAD_CAST "network", BAD_CAST dev->source);
- }
-
- return NULL;
-}
-
-static const char *user_net_to_xml(xmlNodePtr root, struct net_device *dev)
-{
- xmlNodePtr nic;
- xmlNodePtr tmp;
-
- nic = xmlNewChild(root, NULL, BAD_CAST "interface", NULL);
- if (nic == NULL)
- return XML_ERROR;
- xmlNewProp(nic, BAD_CAST "type", BAD_CAST dev->type);
-
- tmp = xmlNewChild(nic, NULL, BAD_CAST "mac", NULL);
- if (tmp == NULL)
- return XML_ERROR;
- xmlNewProp(tmp, BAD_CAST "address", BAD_CAST dev->mac);
-
- return NULL;
+ return msg;
}
static const char *net_xml(xmlNodePtr root, struct domain *dominfo)
{
int i;
const char *msg = NULL;
+ xmlNodePtr nic;
+ xmlNodePtr tmp;
for (i = 0; (i < dominfo->dev_net_ct) && (msg == NULL); i++) {
struct virt_device *dev = &dominfo->dev_net[i];
@@ -239,12 +200,22 @@
struct net_device *net = &dev->dev.net;
+ nic = xmlNewChild(root, NULL, BAD_CAST "interface", NULL);
+ if (nic == NULL)
+ return XML_ERROR;
+ xmlNewProp(nic, BAD_CAST "type", BAD_CAST net->type);
+
+ tmp = xmlNewChild(nic, NULL, BAD_CAST "mac", NULL);
+ if (tmp == NULL)
+ return XML_ERROR;
+ xmlNewProp(tmp, BAD_CAST "address", BAD_CAST net->mac);
+
if (STREQ(dev->dev.net.type, "network"))
- msg = network_net_to_xml(root, net);
+ msg = set_net_source(nic, net, "network");
else if (STREQ(dev->dev.net.type, "bridge"))
msg = bridge_net_to_xml(root, net);
else if (STREQ(dev->dev.net.type, "user"))
- msg = user_net_to_xml(root, net);
+ continue;
else
msg = "Unknown interface type";
}
15 years, 8 months
[PATCH] Remove ns param from rasd_to_vpool() in RPCS
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1239816487 25200
# Node ID 4574c6f289f29c9afd2bd3a3434360c17891e9fe
# Parent 0c6f5aa107a82840acffa2e3931ddf05726c35c5
Remove ns param from rasd_to_vpool() in RPCS
This parameter isn't being used by any of the functions.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 0c6f5aa107a8 -r 4574c6f289f2 src/Virt_ResourcePoolConfigurationService.c
--- a/src/Virt_ResourcePoolConfigurationService.c Thu Apr 09 10:07:27 2009 -0700
+++ b/src/Virt_ResourcePoolConfigurationService.c Wed Apr 15 10:28:07 2009 -0700
@@ -78,8 +78,7 @@
}
static const char *net_rasd_to_pool(CMPIInstance *inst,
- struct virt_pool *pool,
- const char *ns)
+ struct virt_pool *pool)
{
const char *val = NULL;
const char *msg = NULL;
@@ -116,8 +115,7 @@
#if VIR_USE_LIBVIRT_STORAGE
static const char *disk_rasd_to_pool(CMPIInstance *inst,
- struct virt_pool *pool,
- const char *ns)
+ struct virt_pool *pool)
{
const char *val = NULL;
const char *msg = NULL;
@@ -153,8 +151,7 @@
}
#else
static const char *disk_rasd_to_pool(CMPIInstance *inst,
- struct virt_pool *pool,
- const char *ns)
+ struct virt_pool *pool)
{
return "Storage pool creation not supported in this version of libvirt";
}
@@ -169,15 +166,14 @@
static const char *rasd_to_vpool(CMPIInstance *inst,
struct virt_pool *pool,
- uint16_t type,
- const char *ns)
+ uint16_t type)
{
pool->type = type;
if (type == CIM_RES_TYPE_NET) {
- return net_rasd_to_pool(inst, pool, ns);
+ return net_rasd_to_pool(inst, pool);
} else if (type == CIM_RES_TYPE_DISK) {
- return disk_rasd_to_pool(inst, pool, ns);
+ return disk_rasd_to_pool(inst, pool);
}
pool->type = CIM_RES_TYPE_UNKNOWN;
@@ -215,7 +211,7 @@
if (res_type_from_rasd_classname(CLASSNAME(op), &type) != CMPI_RC_OK)
return "Unable to determine resource type";
- msg = rasd_to_vpool(inst, pool, type, NAMESPACE(op));
+ msg = rasd_to_vpool(inst, pool, type);
return msg;
}
15 years, 8 months
[PATCH] [TEST]Update RPCS/04 to validate that the Network child pool can be created through the providers
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1239868524 25200
# Node ID 860c994006a12104618e29bf051730993568bcc1
# Parent 4ec367c94c356de7fac5a19ffe215c316d0cdcd1
[TEST]Update RPCS/04 to validate that the Network child pool can be created through the providers
Follow up patch will valide Disk child pool creation and verification in the same tc
Tested for KVM with current sources
Signed-off-by: Guolian Yun<yunguol(a)cn.ibm.com>
diff -r 4ec367c94c35 -r 860c994006a1 suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py
--- a/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Wed Apr 08 02:22:53 2009 -0700
+++ b/suites/libvirt-cim/cimtest/ResourcePoolConfigurationService/04_CreateChildResourcePool.py Thu Apr 16 00:55:24 2009 -0700
@@ -52,32 +52,84 @@
from XenKvmLib import rpcs_service
from CimTest.Globals import logger
from CimTest.ReturnCodes import FAIL, PASS
-from XenKvmLib.const import do_main, platform_sup
+from XenKvmLib.const import do_main, platform_sup, get_provider_version
from XenKvmLib.classes import get_typed_class
+from pywbem.cim_obj import CIMInstanceName, CIMInstance
+from XenKvmLib.enumclass import EnumInstances
+from XenKvmLib.common_util import destroy_netpool
cim_errno = pywbem.CIM_ERR_NOT_SUPPORTED
cim_mname = "CreateChildResourcePool"
+libvirt_cim_child_pool_rev = 837
+testpool = "mypool"
+
+def verify_pool(pool_list, poolname):
+ status = PASS
+ if len(pool_list) < 1:
+ logger.error("Returen %i instances, expected at least one instance",
+ len(pool_list))
+ return FAIL
+
+ for i in range(0, len(pool_list)):
+ ret_pool = pool_list[i].InstanceID
+ if ret_pool == poolname:
+ break
+ elif ret_pool != poolname and i == len(pool_list)-1:
+ logger.error("Can not find expected pool")
+ status = FAIL
+
+ return status
@do_main(platform_sup)
def main():
options = main.options
rpcs_conn = eval("rpcs_service." + get_typed_class(options.virt, \
"ResourcePoolConfigurationService"))(options.ip)
- try:
- rpcs_conn.CreateChildResourcePool()
- except pywbem.CIMError, (err_no, desc):
- if err_no == cim_errno :
- logger.info("Got expected exception for '%s' service", cim_mname)
- logger.info("Errno is '%s' ", err_no)
- logger.info("Error string is '%s'", desc)
- return PASS
- else:
- logger.error("Unexpected rc code %s and description %s\n",
- err_no, desc)
+
+ curr_cim_rev, changeset = get_provider_version(options.virt, options.ip)
+ if curr_cim_rev < libvirt_cim_child_pool_rev:
+ try:
+ rpcs_conn.CreateChildResourcePool()
+ except pywbem.CIMError, (err_no, desc):
+ if err_no == cim_errno :
+ logger.info("Got expected exception for '%s'service", cim_mname)
+ logger.info("Errno is '%s' ", err_no)
+ logger.info("Error string is '%s'", desc)
+ return PASS
+ else:
+ logger.error("Unexpected rc code %s and description %s\n",
+ err_no, desc)
+ return FAIL
+ elif curr_cim_rev >= libvirt_cim_child_pool_rev:
+ nprasd = get_typed_class(options.virt,
+ 'NetPoolResourceAllocationSettingData')
+ np_id = 'NetworkPool/%s' % testpool
+ iname = CIMInstanceName(nprasd,
+ namespace = 'root/virt',
+ keybindings = {'InstanceID':np_id})
+ logger.info('iname is %s', iname)
+ nrasd = CIMInstance(nprasd, path = iname,
+ properties ={
+ "Address" : "192.168.0.30",
+ "Netmask" : "255.255.255.0",
+ "IPRangeStart" : "192.168.0.31",
+ "IPRangeEnd" : "192.168.0.57",
+ "ForwardMode":"route eth1"})
+ try:
+ rpcs_conn.CreateChildResourcePool(ElementName=testpool,
+ Settings=[nrasd.tomof()])
+ except pywbem.CIMError, details:
+ logger.error("Invoke CreateChildResourcePool() error")
+ logger.error(details)
return FAIL
-
- logger.error("The execution should not have reached here!!")
- return FAIL
+
+ np = get_typed_class(options.virt, 'NetworkPool')
+ netpool = EnumInstances(options.ip, np)
+ status = verify_pool(netpool, np_id)
+
+ destroy_netpool(options.ip, options.virt, testpool)
+ return status
+
if __name__ == "__main__":
sys.exit(main())
15 years, 8 months
virNodeGetFreeMemory() returns bytes with Xen driver
by Tyrel Datwyler
The libvirt API states that the return value of virNodeGetFreeMemory() is
in kilobytes. When making this call against a Xen hypervisor and comparing
the returned value with what is listed by "xm info" it appears that libvirt
is returning the free memory in bytes and not kilobytes. Can anybody
confirm this behavior?
This code snippet:
conn = virConnectOpen("xen:///localhost/");
hostmem = virNodeGetFreeMemory(conn);
fprintf(stdout, "Host free memory = %llu\n", hostmem);
produces the following output:
Host free memory = 3236012032
The following comes from running "xm info" on the Xen host:
free_memory : 3086
The libvirt value is convertable into the "xm" returned value if bytes is
assumed.
Regards,
Tyrel Datwyler
Linux Software Engineer
eServer Systems Management
IBM Systems and Technology Group
Email: tyreld(a)us.ibm.com
External: (503) 578-3489, T/L: 775-3489
15 years, 8 months
[PATCH] [TEST] Fix SettingsDefineCapabilities/01_forward.py with the provider's changes of the output of ac association when using DiskPool as input
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1239851971 25200
# Node ID de971738caf9e9b3eae3c3fe8dc65cc7114a7ff1
# Parent 4ec367c94c356de7fac5a19ffe215c316d0cdcd1
[TEST] Fix SettingsDefineCapabilities/01_forward.py with the provider's changes of the output of ac association when using DiskPool as input
Tested for KVM with current sources and rpm
Signed-off-by: Guolian Yun<yunguol(a)cn.ibm.com
diff -r 4ec367c94c35 -r de971738caf9 suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py
--- a/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Wed Apr 08 02:22:53 2009 -0700
+++ b/suites/libvirt-cim/cimtest/SettingsDefineCapabilities/01_forward.py Wed Apr 15 20:19:31 2009 -0700
@@ -64,10 +64,12 @@
from XenKvmLib.classes import get_typed_class
from XenKvmLib.common_util import print_field_error
from XenKvmLib.const import get_provider_version
+from XenKvmLib.pool import enum_volumes
platform_sup = ['Xen', 'KVM', 'XenFV', 'LXC']
libvirt_rasd_template_changes = 707
libvirt_rasd_new_changes = 805
+libvirt_rasd_dpool_changes = 839
memid = "MemoryPool/0"
procid = "ProcessorPool/0"
@@ -184,8 +186,12 @@
if curr_cim_rev >= libvirt_rasd_new_changes:
exp_len = 16
if virt == 'KVM':
- if curr_cim_rev >= libvirt_rasd_new_changes:
+ if curr_cim_rev >= libvirt_rasd_new_changes and \
+ curr_cim_rev < libvirt_rasd_dpool_changes:
exp_len = 8
+ if curr_cim_rev >= libvirt_rasd_dpool_changes:
+ volumes = enum_volumes(virt, server)
+ exp_len = volumes * 4
if len(assoc_info) != exp_len:
logger.error("%s returned %i ResourcePool objects instead"
15 years, 8 months
[PATCH] [TEST] #2 Fix 03_hs_to_settdefcap.py with the provider's changes of the output of ac association when using DiskPool as input
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1239851524 25200
# Node ID 352e8509493cfce2620bca94798a1012c63cb410
# Parent 4ec367c94c356de7fac5a19ffe215c316d0cdcd1
[TEST] #2 Fix 03_hs_to_settdefcap.py with the provider's changes of the output of ac association when using DiskPool as input
Updates from 1 to 2:
Query libvirt to get the number of DiskRASD instances instead of hardcode
Tested for KVM with current sources and rpm
Signed-off-by: Guolian Yun<yunguol(a)cn.ibm.com>
diff -r 4ec367c94c35 -r 352e8509493c suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py
--- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Apr 08 02:22:53 2009 -0700
+++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Apr 15 20:12:04 2009 -0700
@@ -51,12 +51,14 @@
from XenKvmLib.test_xml import testxml
from XenKvmLib.test_doms import destroy_and_undefine_all
from XenKvmLib.const import get_provider_version
+from XenKvmLib.pool import enum_volumes
sup_types = ['Xen', 'KVM', 'XenFV', 'LXC']
test_dom = "domgst_test"
test_vcpus = 1
libvirt_rasd_template_changes = 707
libvirt_rasd_new_changes = 805
+libvirt_rasd_dpool_changes = 839
def setup_env(server, virt="Xen"):
status = PASS
@@ -227,8 +229,12 @@
if curr_cim_rev >= libvirt_rasd_new_changes:
exp_len = 16
if virt == 'KVM':
- if curr_cim_rev >= libvirt_rasd_new_changes:
+ if curr_cim_rev >= libvirt_rasd_new_changes and \
+ curr_cim_rev < libvirt_rasd_dpool_changes:
exp_len = 8
+ if curr_cim_rev >= libvirt_rasd_dpool_changes:
+ volumes = enum_volumes(virt, server)
+ exp_len = volumes * 4
if len(assoc_info) != exp_len:
logger.error("'%s' returned %i RASD objects instead of %i",
diff -r 4ec367c94c35 -r 352e8509493c suites/libvirt-cim/lib/XenKvmLib/pool.py
--- a/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed Apr 08 02:22:53 2009 -0700
+++ b/suites/libvirt-cim/lib/XenKvmLib/pool.py Wed Apr 15 20:12:04 2009 -0700
@@ -26,6 +26,8 @@
from XenKvmLib.classes import get_typed_class
from XenKvmLib.const import get_provider_version
from XenKvmLib.enumclass import EnumInstances
+from VirtLib.utils import run_remote
+from XenKvmLib.xm_virt_util import virt2uri
input_graphics_pool_rev = 757
@@ -78,3 +80,16 @@
return pool_insts, PASS
+def enum_volumes(virt, server):
+ volume = 0
+ cmd = "virsh -c %s vol-list %s | sed -e '1,2 d' -e '$ d'" % \
+ (virt2uri(virt), 'cimtest-diskpool')
+ ret, out = run_remote(server ,cmd)
+ if ret != 0:
+ return None
+ lines = out.split("\n")
+ for line in lines:
+ volume = volume + 1
+
+ return volume
+
15 years, 8 months
[PATCH] [TEST]#2 Fix 03_hs_to_settdefcap.py with the provider's changes of the output of ac association when using DiskPool as input
by yunguol@cn.ibm.com
# HG changeset patch
# User Guolian Yun <yunguol(a)cn.ibm.com>
# Date 1239849166 25200
# Node ID 7b81278fbdd9d4337ac7422e98bcfebc93b70e64
# Parent 4ec367c94c356de7fac5a19ffe215c316d0cdcd1
[TEST]#2 Fix 03_hs_to_settdefcap.py with the provider's changes of the output of ac association when using DiskPool as input
Updates from 1 to 2:
Query libvirt to get the number of DiskRASD instances and then compare to the result
Tested for KVM with current sources and rpm
Signed-off-by: Guolian Yun<yunguol(a)cn.ibm.com>
diff -r 4ec367c94c35 -r 7b81278fbdd9 suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py
--- a/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Apr 08 02:22:53 2009 -0700
+++ b/suites/libvirt-cim/cimtest/HostSystem/03_hs_to_settdefcap.py Wed Apr 15 19:32:46 2009 -0700
@@ -40,7 +40,9 @@
# Feb 13 2008
import sys
-from VirtLib.live import full_hostname
+from VirtLib.live import full_hostname
+from VirtLib.utils import run_remote
+from XenKvmLib.xm_virt_util import virt2uri
from XenKvmLib.common_util import get_host_info
from XenKvmLib.assoc import Associators
from XenKvmLib.vxml import XenXML, KVMXML, get_class
@@ -57,6 +59,7 @@
test_vcpus = 1
libvirt_rasd_template_changes = 707
libvirt_rasd_new_changes = 805
+libvirt_rasd_dpool_changes = 839
def setup_env(server, virt="Xen"):
status = PASS
@@ -216,6 +219,16 @@
curr_cim_rev, changeset = get_provider_version(virt, server)
exp_len = 4
+
+ volume = 0
+ cmd = "virsh -c %s vol-list %s" % (virt2uri(server), 'cimtest-diskpool')
+ ret, out = run_remote(server ,cmd)
+ if ret != 0:
+ return None
+ lines = out.split("\n")
+ for line in lines:
+ volume = volume + 1
+
if 'DiskPool' in ap['InstanceID']:
# For Diskpool, we have info 1 for each of Min, Max,
# default, Increment and 1 for each of PV and FV
@@ -227,8 +240,11 @@
if curr_cim_rev >= libvirt_rasd_new_changes:
exp_len = 16
if virt == 'KVM':
- if curr_cim_rev >= libvirt_rasd_new_changes:
+ if curr_cim_rev >= libvirt_rasd_new_changes and \
+ curr_cim_rev < libvirt_rasd_dpool_changes:
exp_len = 8
+ if curr_cim_rev >= libvirt_rasd_dpool_changes:
+ exp_len = volume
if len(assoc_info) != exp_len:
logger.error("'%s' returned %i RASD objects instead of %i",
15 years, 8 months
Release of libvirt-cim 0.5.5
by Kaitlin Rupert
A new release of libvirt-cim is now available at: ftp://libvirt.org/libvirt-cim/
* Features:
- Basic network pool creation and deletion
- Basic storage pool creation and deletion (directory pools only)
- Storage volumes in a pool represented as template ResourceAllocationSettingData
- Advertise profiles via SLP
* Bug fixes:
- Improved reporting: libvirt error is now included in error message returned to the user
- ElementConformsToProfile association now supports the RegisteredProfile <--> AllocationCapabilities
- Prevent seg fault in get_typed_class() by returning is a NULL value is specified for refcn
- Fix xmlgen so XML isn't generated for devices with an UNKNOWN device type
- Set the device type of a resource to UNKNOWN after the call to detach_device(), otherwise detach_device() doesn't know what device type to handle
- Allow user to specify guest's emulator and expose emulator via the guest's VirtualSystemSettingData
- Improve randomization of MAC generation
- Determine whether the host system supports KVM or QEMU guests. If KVM support is available, create a KVM guest. Otherwise, create a QEMU guest.
- Fix some connection leaks and some libvirt pointer leaks
- Be sure to specify the root/virt namespace (instead of root/interop) in ElementConformsToProfile when building the instance of HostSystem
15 years, 8 months
[PATCH] Pass OP with root/virt NS to get_host() in ECTP
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1239679403 25200
# Node ID c9160bb4698b629c7832efcd32f6641fd74449de
# Parent 8d0ce3b79aaacde9a7b25d8e2fe2eb79878229ee
Pass OP with root/virt NS to get_host() in ECTP
When calling get_host() in ECTP, be sure to pass object path with root/virt
namespace instead of root/interop. get_host() uses the namespace of the ref
when creating the instance, so this needs to be set properly.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 8d0ce3b79aaa -r c9160bb4698b src/Virt_ElementConformsToProfile.c
--- a/src/Virt_ElementConformsToProfile.c Wed Apr 08 22:22:42 2009 -0400
+++ b/src/Virt_ElementConformsToProfile.c Mon Apr 13 20:23:23 2009 -0700
@@ -64,15 +64,7 @@
if (class == NULL)
return s;
- if (STREQC(class, "HostSystem")) {
- s = get_host(_BROKER, info->context, ref, &inst, false);
- if (s.rc == CMPI_RC_OK)
- inst_list_add(list, inst);
- goto out;
- }
-
- classname = get_typed_class(pfx_from_conn(conn),
- class);
+ classname = get_typed_class(pfx_from_conn(conn), class);
if (classname == NULL) {
cu_statusf(_BROKER, &s,
CMPI_RC_ERR_FAILED,
@@ -83,7 +75,14 @@
op = CMNewObjectPath(_BROKER, CIM_VIRT_NS, classname, &s);
if ((s.rc != CMPI_RC_OK) || CMIsNullObject(op))
goto out;
-
+
+ if (STREQC(class, "HostSystem")) {
+ s = get_host(_BROKER, info->context, op, &inst, false);
+ if (s.rc == CMPI_RC_OK)
+ inst_list_add(list, inst);
+ goto out;
+ }
+
en = CBEnumInstances(_BROKER, info->context , op, info->properties, &s);
if (en == NULL) {
cu_statusf(_BROKER, &s,
15 years, 8 months
[PATCH] Add logic to delete storage pools
by Kaitlin Rupert
# HG changeset patch
# User Kaitlin Rupert <karupert(a)us.ibm.com>
# Date 1239296847 25200
# Node ID cdf4fa212c15f1b22b0b4b1d1535e1716e1e52d2
# Parent 3b7ba02cd34a4b91903e4d412fc7d5dd75779598
Add logic to delete storage pools.
Older versions of libvirt don't support storage pool creation / deletion. So
the pool deletion logic needs to return a different error message in the case
of older versions.
Signed-off-by: Kaitlin Rupert <karupert(a)us.ibm.com>
diff -r 3b7ba02cd34a -r cdf4fa212c15 libxkutil/pool_parsing.c
--- a/libxkutil/pool_parsing.c Tue Apr 07 21:54:43 2009 -0300
+++ b/libxkutil/pool_parsing.c Thu Apr 09 10:07:27 2009 -0700
@@ -138,9 +138,32 @@
err1:
virNetworkFree(ptr);
+
+ } else if (res_type == CIM_RES_TYPE_DISK) {
+#if VIR_USE_LIBVIRT_STORAGE
+ virStoragePoolPtr ptr = virStoragePoolLookupByName(conn, name);
+ if (ptr == NULL) {
+ CU_DEBUG("Storage pool %s is not defined", name);
+ return 0;
+ }
+
+ if (virStoragePoolDestroy(ptr) != 0) {
+ CU_DEBUG("Unable to destroy storage pool");
+ goto err2;
+ }
+
+ if (virStoragePoolUndefine(ptr) != 0) {
+ CU_DEBUG("Unable to undefine storage pool");
+ goto err2;
+ }
+
+ ret = 1;
+
+ err2:
+ virStoragePoolFree(ptr);
+#endif
}
-
return ret;
}
diff -r 3b7ba02cd34a -r cdf4fa212c15 src/Virt_ResourcePoolConfigurationService.c
--- a/src/Virt_ResourcePoolConfigurationService.c Tue Apr 07 21:54:43 2009 -0300
+++ b/src/Virt_ResourcePoolConfigurationService.c Thu Apr 09 10:07:27 2009 -0700
@@ -139,6 +139,18 @@
return msg;
}
+
+static const char *_delete_pool(virConnectPtr conn,
+ const char *pool_name,
+ uint16_t type)
+{
+ const char *msg = NULL;
+
+ if (destroy_pool(conn, pool_name, type) == 0)
+ msg = "Unable to destroy resource pool";
+
+ return msg;
+}
#else
static const char *disk_rasd_to_pool(CMPIInstance *inst,
struct virt_pool *pool,
@@ -146,6 +158,13 @@
{
return "Storage pool creation not supported in this version of libvirt";
}
+
+static const char *_delete_pool(virConnectPtr conn,
+ const char *pool_name,
+ uint16_t type)
+{
+ return "Storage pool deletion not supported in this version of libvirt";
+}
#endif
static const char *rasd_to_vpool(CMPIInstance *inst,
@@ -379,6 +398,7 @@
CMPIObjectPath *pool = NULL;
virConnectPtr conn = NULL;
const char *poolid = NULL;
+ const char *msg = NULL;
char *pool_name = NULL;
uint16_t type;
@@ -432,10 +452,12 @@
goto out;
}
- if (destroy_pool(conn, pool_name, type) == 0) {
+ msg = _delete_pool(conn, pool_name, type);
+ if (msg != NULL) {
cu_statusf(_BROKER, &s,
CMPI_RC_ERR_FAILED,
- "Unable to destroy resource pool");
+ "Storage pool deletion error: %s", msg);
+
goto out;
}
15 years, 8 months