# HG changeset patch
# User Dan Smith <danms(a)us.ibm.com>
# Date 1202486140 28800
# Node ID a9d5094d7af37968f66e8c10c641f7682e2def3e
# Parent da31d60724565fb6da4bed307bcc69d710680306
(#4) Make the ProcRASD show CPU pinning information
This is done by embedding instances of the physical processor objects into
the HostResource[] field of the ProcRASD (per the MOF).
In order for the linkage to be present, you need sblim-cmpi-base.
Changes:
- Set the values on the instance and not as keys in the ref
- Remove silly double-semicolons from proc_set_cpu()
- Actually report the correct pinning information
Signed-off-by: Dan Smith <danms(a)us.ibm.com>
diff -r da31d6072456 -r a9d5094d7af3 src/Virt_RASD.c
--- a/src/Virt_RASD.c Wed Feb 06 09:18:23 2008 -0800
+++ b/src/Virt_RASD.c Fri Feb 08 07:55:40 2008 -0800
@@ -23,6 +23,7 @@
#include <string.h>
#include <inttypes.h>
#include <sys/stat.h>
+#include <unistd.h>
#include <cmpidt.h>
#include <cmpift.h>
@@ -91,6 +92,191 @@ char *rasd_to_xml(CMPIInstance *rasd)
{
/* FIXME: Remove this */
return NULL;
+}
+
+static bool proc_get_physical_ref(const CMPIBroker *broker,
+ uint32_t physnum,
+ struct inst_list *list)
+{
+ CMPIObjectPath *op = NULL;
+ CMPIStatus s;
+ char hostname[255];
+ char *devid = NULL;
+ CMPIInstance *inst;
+ bool result = false;
+
+ if (asprintf(&devid, "%i", physnum) == -1) {
+ CU_DEBUG("Failed to create DeviceID string");
+ goto out;
+ }
+
+ if (gethostname(hostname, sizeof(hostname)) == -1) {
+ CU_DEBUG("Hostname overflow");
+ goto out;
+ }
+
+ op = CMNewObjectPath(broker, "root/cimv2", "Linux_Processor",
&s);
+ if ((op == NULL) || (s.rc != CMPI_RC_OK)) {
+ CU_DEBUG("Failed to get ObjectPath for processor");
+ goto out;
+ }
+
+ inst = CMNewInstance(broker, op, &s);
+ if ((inst == NULL) || (s.rc != CMPI_RC_OK)) {
+ CU_DEBUG("Failed to make instance");
+ goto out;
+ }
+
+ CMSetProperty(inst, "CreationClassName",
+ (CMPIValue *)"Linux_Processor", CMPI_chars);
+ CMSetProperty(inst, "SystemName",
+ (CMPIValue *)hostname, CMPI_chars);
+ CMSetProperty(inst, "SystemCreationClassName",
+ (CMPIValue *)"Linux_ComputerSystem", CMPI_chars);
+ CMSetProperty(inst, "DeviceID",
+ (CMPIValue *)devid, CMPI_chars);
+
+ inst_list_add(list, inst);
+
+ result = true;
+ out:
+ free(devid);
+
+ return result;
+}
+
+static uint32_t proc_set_cpu(const CMPIBroker *broker,
+ virNodeInfoPtr node,
+ virDomainPtr dom,
+ struct virt_device *dev,
+ struct inst_list *list)
+{
+ virVcpuInfoPtr vinfo = NULL;
+ virDomainInfo info;
+ uint8_t *cpumaps = NULL;
+ int ret;
+ int i;
+ int vcpu = dev->dev.vcpu.number;
+ int maplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(*node));
+
+ ret = virDomainGetInfo(dom, &info);
+ if (ret == -1) {
+ CU_DEBUG("Failed to get info for domain `%s'",
+ virDomainGetName(dom));
+ goto out;
+ }
+
+ if (dev->dev.vcpu.number >= info.nrVirtCpu) {
+ CU_DEBUG("VCPU %i higher than max of %i for %s",
+ dev->dev.vcpu.number,
+ info.nrVirtCpu,
+ virDomainGetName(dom));
+ goto out;
+ }
+
+ vinfo = calloc(info.nrVirtCpu, sizeof(*vinfo));
+ if (vinfo == NULL) {
+ CU_DEBUG("Failed to allocate memory for %i virVcpuInfo",
+ info.nrVirtCpu);
+ goto out;
+ }
+
+ cpumaps = calloc(info.nrVirtCpu, maplen);
+ if (cpumaps == NULL) {
+ CU_DEBUG("Failed to allocate memory for %ix%i maps",
+ info.nrVirtCpu, maplen);
+ goto out;
+ }
+
+ ret = virDomainGetVcpus(dom, vinfo, info.nrVirtCpu, cpumaps, maplen);
+ if (ret < info.nrVirtCpu) {
+ CU_DEBUG("Failed to get VCPU info for %s",
+ virDomainGetName(dom));
+ goto out;
+ }
+
+ for (i = 0; i < VIR_NODEINFO_MAXCPUS(*node); i++) {
+ if (VIR_CPU_USABLE(cpumaps, maplen, vcpu, i)) {
+ CU_DEBUG("VCPU %i pinned to physical %i",
+ vcpu, i);
+ proc_get_physical_ref(broker, i, list);
+ } else {
+ CU_DEBUG("VCPU %i not pinned to physical %i",
+ vcpu, i);
+ }
+ }
+ out:
+ free(vinfo);
+ free(cpumaps);
+
+ return 0;
+}
+
+static CMPIStatus proc_rasd_from_vdev(const CMPIBroker *broker,
+ struct virt_device *dev,
+ const char *host,
+ const CMPIObjectPath *ref,
+ CMPIInstance *inst)
+{
+ virConnectPtr conn = NULL;
+ virDomainPtr dom = NULL;
+ virNodeInfo node;
+ CMPIStatus s;
+ CMPIArray *array;
+ struct inst_list list;
+
+ inst_list_init(&list);
+
+ conn = connect_by_classname(broker, CLASSNAME(ref), &s);
+ if (conn == NULL) {
+ cu_statusf(broker, &s,
+ CMPI_RC_ERR_FAILED,
+ "Failed to connect for ProcRASD (%s)",
+ CLASSNAME(ref));
+ goto out;
+ }
+
+ dom = virDomainLookupByName(conn, host);
+ if (dom == NULL) {
+ cu_statusf(broker, &s,
+ CMPI_RC_ERR_NOT_FOUND,
+ "Unable to get domain for ProcRASD: %s", host);
+ goto out;
+ }
+
+ if (virNodeGetInfo(virDomainGetConnect(dom), &node) == -1) {
+ cu_statusf(broker, &s,
+ CMPI_RC_ERR_FAILED,
+ "Unable to get node info");
+ goto out;
+ }
+
+ proc_set_cpu(broker, &node, dom, dev, &list);
+
+ if (list.cur > 0) {
+ int i;
+
+ array = CMNewArray(broker,
+ list.cur,
+ CMPI_instance,
+ &s);
+ for (i = 0; i < list.cur; i++) {
+ CMSetArrayElementAt(array,
+ i,
+ (CMPIValue *)&list.list[i],
+ CMPI_instance);
+ }
+
+ CMSetProperty(inst, "HostResource",
+ (CMPIValue *)&array, CMPI_instanceA);
+ }
+
+ out:
+ inst_list_free(&list);
+ virDomainFree(dom);
+ virConnectClose(conn);
+
+ return s;
}
static CMPIInstance *rasd_from_vdev(const CMPIBroker *broker,
@@ -159,6 +345,8 @@ static CMPIInstance *rasd_from_vdev(cons
(CMPIValue *)&dev->dev.mem.size, CMPI_uint64);
CMSetProperty(inst, "Limit",
(CMPIValue *)&dev->dev.mem.maxsize, CMPI_uint64);
+ } else if (dev->type == VIRT_DEV_VCPU) {
+ proc_rasd_from_vdev(broker, dev, host, ref, inst);
}
/* FIXME: Put the HostResource in place */