[PATCH] Fix AllocationCapabilities to make EnumInstances and EnumInstanceNames work

# HG changeset patch # User Jay Gagnon <grendel@linux.vnet.ibm.com> # Date 1195510519 18000 # Node ID 9e000f9f9096cad7a40b1221b467fe99efc4b93f # Parent 79f43a4ffc38d7227e17432b971639ae20f6f6ed Fix AllocationCapabilities to make EnumInstances and EnumInstanceNames work. I don't have a ton of experience with instance providers, so this might require a little extra scrutiny as I'm sure there are several rookie mistakes. Signed-off-by: Jay Gagnon <grendel@linux.vnet.ibm.com> diff -r 79f43a4ffc38 -r 9e000f9f9096 src/Makefile.am --- a/src/Makefile.am Mon Nov 19 16:19:06 2007 -0500 +++ b/src/Makefile.am Mon Nov 19 17:15:19 2007 -0500 @@ -91,9 +91,9 @@ libVirt_ElementCapabilities_la_LIBADD = -lVirt_ComputerSystem \ -lVirt_HostSystem -libVirt_AllocationCapabilities_la_DEPENDENCIES = libVirt_RASD.la +libVirt_AllocationCapabilities_la_DEPENDENCIES = libVirt_RASD.la libVirt_DevicePool.la libVirt_AllocationCapabilities_la_SOURCES = Virt_AllocationCapabilities.c -libVirt_AllocationCapabilities_la_LIBADD = -lVirt_RASD +libVirt_AllocationCapabilities_la_LIBADD = -lVirt_RASD -lVirt_DevicePool libVirt_SettingsDefineCapabilities_la_DEPENDENCIES = libVirt_RASD.la libVirt_DevicePool.la libVirt_SettingsDefineCapabilities_la_SOURCES = Virt_SettingsDefineCapabilities.c diff -r 79f43a4ffc38 -r 9e000f9f9096 src/Virt_AllocationCapabilities.c --- a/src/Virt_AllocationCapabilities.c Mon Nov 19 16:19:06 2007 -0500 +++ b/src/Virt_AllocationCapabilities.c Mon Nov 19 17:15:19 2007 -0500 @@ -32,6 +32,7 @@ #include "Virt_AllocationCapabilities.h" #include "Virt_RASD.h" +#include "Virt_DevicePool.h" const static CMPIBroker *_BROKER; @@ -83,6 +84,88 @@ static CMPIStatus return_alloc_cap(const else CMReturnInstance(results, inst); out: + return s; +} + +static CMPIStatus alloc_cap_instances(const CMPIBroker *broker, + const CMPIObjectPath *ref, + const CMPIResult *results, + bool names_only, + const char **properties) +{ + int i; + virConnectPtr conn; + CMPIInstance *inst; + struct inst_list alloc_cap_list; + struct inst_list device_pool_list; + CMPIStatus s = {CMPI_RC_OK, NULL}; + + CU_DEBUG("In alloc_cap_instances()"); + + inst_list_init(&device_pool_list); + inst_list_init(&alloc_cap_list); + + if (!provider_is_responsible(broker, ref, &s)) + goto out; + + conn = lv_connect(_BROKER, &s); + if (conn == NULL) + goto out; + + for (i = 0; device_pool_names[i]; i++) { + s = get_pool_by_type(broker, + conn, + device_pool_names[i], + NAMESPACE(ref), + &device_pool_list); + if (s.rc != CMPI_RC_OK) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Error fetching device pools"); + goto out; + } + } + + for (i = 0; i < device_pool_list.cur; i++) { + inst = get_typed_instance(broker, + "AllocationCapabilities", + NAMESPACE(ref)); + if (inst == NULL) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Could not get alloc_cap instance"); + goto out; + } + + s = cu_copy_prop(broker, device_pool_list.list[i], inst, + "InstanceID", NULL); + if (s.rc != CMPI_RC_OK) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Error copying InstanceID"); + goto out; + } + + s = cu_copy_prop(broker, device_pool_list.list[i], inst, + "ResourceType", NULL); + if (s.rc != CMPI_RC_OK) { + cu_statusf(broker, &s, + CMPI_RC_ERR_FAILED, + "Error copying InstanceID"); + goto out; + } + + inst_list_add(&alloc_cap_list, inst); + } + + if (names_only) + cu_return_instance_names(results, &alloc_cap_list); + else + cu_return_instances(results, &alloc_cap_list); + + out: + inst_list_free(&alloc_cap_list); + inst_list_free(&device_pool_list); return s; } @@ -95,8 +178,23 @@ static CMPIStatus GetInstance(CMPIInstan return return_alloc_cap(reference, results, 0); } -DEFAULT_EI(); -DEFAULT_EIN(); +static CMPIStatus EnumInstanceNames(CMPIInstanceMI *self, + const CMPIContext *context, + const CMPIResult *results, + const CMPIObjectPath *reference) +{ + return alloc_cap_instances(_BROKER, reference, results, true, NULL); +} + +static CMPIStatus EnumInstances(CMPIInstanceMI *self, + const CMPIContext *context, + const CMPIResult *results, + const CMPIObjectPath *reference, + const char **properties) +{ + return alloc_cap_instances(_BROKER, reference, results, false, properties); +} + DEFAULT_CI(); DEFAULT_MI(); DEFAULT_DI();

JG> + for (i = 0; device_pool_names[i]; i++) { JG> + s = get_pool_by_type(broker, JG> + conn, JG> + device_pool_names[i], JG> + NAMESPACE(ref), JG> + &device_pool_list); JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error fetching device pools"); JG> + goto out; JG> + } JG> + } We do this elsewhere too. Perhaps we need a get_all_pools() function in Virt_DevicePool? JG> + JG> + for (i = 0; i < device_pool_list.cur; i++) { JG> + inst = get_typed_instance(broker, JG> + "AllocationCapabilities", JG> + NAMESPACE(ref)); JG> + if (inst == NULL) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Could not get alloc_cap instance"); JG> + goto out; JG> + } JG> + JG> + s = cu_copy_prop(broker, device_pool_list.list[i], inst, JG> + "InstanceID", NULL); This function looks great, but I think we're missing the patch that adds it to libcmpiutil :) JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error copying InstanceID"); JG> + goto out; JG> + } JG> + JG> + s = cu_copy_prop(broker, device_pool_list.list[i], inst, JG> + "ResourceType", NULL); JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error copying InstanceID"); JG> + goto out; JG> + } JG> + JG> + inst_list_add(&alloc_cap_list, inst); JG> + } To reduce the size of this loop, and to be in line with our other instance providers, I think it would be good to define a function that looks something like this: CMPIStatus get_ac_instance(CMPIInstance *pool, CMPIInstance **ac); This would encapsulate things a bit and also would provide a clean hook for ElementCapabilities, which will need to, given a pool, get an instance of AllocationCapabilities. -- Dan Smith IBM Linux Technology Center Open Hypervisor Team email: danms@us.ibm.com

Dan Smith wrote:
JG> + for (i = 0; device_pool_names[i]; i++) { JG> + s = get_pool_by_type(broker, JG> + conn, JG> + device_pool_names[i], JG> + NAMESPACE(ref), JG> + &device_pool_list); JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error fetching device pools"); JG> + goto out; JG> + } JG> + }
We do this elsewhere too. Perhaps we need a get_all_pools() function in Virt_DevicePool?
Sounds reasonable. I'll do that.
JG> + JG> + for (i = 0; i < device_pool_list.cur; i++) { JG> + inst = get_typed_instance(broker, JG> + "AllocationCapabilities", JG> + NAMESPACE(ref)); JG> + if (inst == NULL) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Could not get alloc_cap instance"); JG> + goto out; JG> + } JG> + JG> + s = cu_copy_prop(broker, device_pool_list.list[i], inst, JG> + "InstanceID", NULL);
This function looks great, but I think we're missing the patch that adds it to libcmpiutil :)
Oops. I'll send that out when I send the next iteration of this.
JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error copying InstanceID"); JG> + goto out; JG> + } JG> + JG> + s = cu_copy_prop(broker, device_pool_list.list[i], inst, JG> + "ResourceType", NULL); JG> + if (s.rc != CMPI_RC_OK) { JG> + cu_statusf(broker, &s, JG> + CMPI_RC_ERR_FAILED, JG> + "Error copying InstanceID"); JG> + goto out; JG> + } JG> + JG> + inst_list_add(&alloc_cap_list, inst); JG> + }
To reduce the size of this loop, and to be in line with our other instance providers, I think it would be good to define a function that looks something like this:
CMPIStatus get_ac_instance(CMPIInstance *pool, CMPIInstance **ac);
This would encapsulate things a bit and also would provide a clean hook for ElementCapabilities, which will need to, given a pool, get an instance of AllocationCapabilities.
Sure, no problem. Given all the inexplicable struggle getting that bit working I guess I didn't want to look at it anymore, so the lack of encapsulation didn't really strike me. Easy fix though, and good call. -- -Jay

Kaitlin Rupert wrote:
Jay Gagnon wrote:
+ conn = lv_connect(_BROKER, &s); + if (conn == NULL) + goto out;
Just a minor suggestion.. should lv_connect() be connect_by_classname() instead? =)
Honestly, I wasn't really sure on if there had been a resolution on that front yet, and it was working fine for testing purposes with lv_connect, so I figured I could always fix it later when the final word came down on lv_connect versus connect_by_classname. Have we settled on one yet? -- -Jay

JG> Honestly, I wasn't really sure on if there had been a resolution JG> on that front yet, and it was working fine for testing purposes JG> with lv_connect, so I figured I could always fix it later when the JG> final word came down on lv_connect versus connect_by_classname. JG> Have we settled on one yet? Yep, you should tweak this to use connect_by_classname() now, as I just pushed that set out to the tree. Thanks! -- Dan Smith IBM Linux Technology Center Open Hypervisor Team email: danms@us.ibm.com
participants (3)
-
Dan Smith
-
Jay Gagnon
-
Kaitlin Rupert