Use the agent cpu state code and the upgraded hypervisor vcpu state
retrieval code to implement virDomainGetVCPUMap() api.
---
src/qemu/qemu_driver.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 179 insertions(+)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 99daf90..ba3a6e1 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -15198,6 +15198,184 @@ qemuNodeSuspendForDuration(virConnectPtr conn ATTRIBUTE_UNUSED,
return nodeSuspendForDuration(target, duration, flags);
}
+#define MATCH(FLAG) (flags & (FLAG))
+static int
+qemuDomainGetVCPUMap(virDomainPtr dom,
+ unsigned char **cpumap,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm;
+ qemuDomainObjPrivatePtr priv;
+
+ qemuAgentCPUInfoPtr agentinfo = NULL;
+ qemuMonitorCPUInfoPtr vcpuinfo = NULL;
+ int ninfo = -1;
+
+ virBitmapPtr cpus = NULL;
+ int i;
+ int ret = -1;
+ int dummy;
+
+ virCheckFlags(VIR_DOMAIN_VCPU_MAP_HYPERVISOR |
+ VIR_DOMAIN_VCPU_MAP_AGENT |
+ VIR_DOMAIN_VCPU_MAP_POSSIBLE |
+ VIR_DOMAIN_VCPU_MAP_ONLINE |
+ VIR_DOMAIN_VCPU_MAP_OFFLINE |
+ VIR_DOMAIN_VCPU_MAP_OFFLINABLE |
+ VIR_DOMAIN_VCPU_MAP_ACTIVE, -1);
+
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ return -1;
+
+ priv = vm->privateData;
+
+ /* request data from the guest */
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("domain is not running"));
+ goto endjob;
+ }
+
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) {
+ if (!priv->agent) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("guest agent is not configured"));
+ goto endjob;
+ }
+ qemuDomainObjEnterAgent(vm);
+ ninfo = qemuAgentGetVCPUs(priv->agent, &agentinfo);
+ qemuDomainObjExitAgent(vm);
+ } else {
+ qemuDomainObjEnterMonitor(driver, vm);
+ ninfo = qemuMonitorGetCPUInfo(priv->mon, &vcpuinfo);
+ qemuDomainObjExitMonitor(driver, vm);
+ }
+
+endjob:
+ if (qemuDomainObjEndJob(driver, vm) == 0)
+ vm = NULL;
+
+ if (ninfo < 0)
+ goto cleanup;
+
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) {
+ unsigned int maxcpu = 0;
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) {
+ virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
+ _("qemu guest agent doesn't report active vCPUs
"));
+ goto cleanup;
+ }
+
+ /* count cpus */
+ for (i = 0; i < ninfo; i++) {
+ if (agentinfo[i].id > maxcpu)
+ maxcpu = agentinfo[i].id;
+ }
+
+ /* allocate the returned array, vCPUs are indexed from 0 */
+ if (!(cpus = virBitmapNew(maxcpu + 1))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ /* VIR_DOMAIN_VCPU_MAP_POSSIBLE */
+ for (i = 0; i < ninfo; i++)
+ ignore_value(virBitmapSetBit(cpus, agentinfo[i].id));
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) {
+ for (i = 0; i < ninfo; i++) {
+ if (!agentinfo[i].online)
+ ignore_value(virBitmapClearBit(cpus, agentinfo[i].id));
+ }
+ }
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) {
+ for (i = 0; i < ninfo; i++) {
+ if (agentinfo[i].online)
+ ignore_value(virBitmapClearBit(cpus, agentinfo[i].id));
+ }
+ }
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) {
+ for (i = 0; i < ninfo; i++) {
+ if (!agentinfo[i].offlinable)
+ ignore_value(virBitmapClearBit(cpus, agentinfo[i].id));
+ }
+ }
+ } else {
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) {
+ virReportError(VIR_ERR_INVALID_ARG, "%s",
+ _("qemu driver doesn't support reporting of "
+ "offlinable vCPUs of the hypervisor"));
+ goto cleanup;
+ }
+
+ /* hypervisor cpu stats */
+ if (!(cpus = virBitmapNew(vm->def->maxvcpus))) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ /* map active cpus */
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) {
+ /* offline vcpus can't be active */
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE))
+ goto done;
+
+ for (i = 0; i < ninfo; i++) {
+ if (vcpuinfo[i].active)
+ ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id));
+ }
+
+ goto done;
+ }
+
+ /* for native hotplug, all configured vCPUs are possible for hotplug */
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_POSSIBLE)) {
+ virBitmapSetAll(cpus);
+ goto done;
+ }
+
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) {
+ /* online and offline together produce an empty map */
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE))
+ goto done;
+
+ /* set all bit's so we can subtract online cpus from it later */
+ virBitmapSetAll(cpus);
+ }
+
+ for (i = 0; i < ninfo; i++) {
+ if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE))
+ ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id));
+ else
+ ignore_value(virBitmapClearBit(cpus, vcpuinfo[i].id));
+ }
+ }
+
+done:
+ if (cpumap && virBitmapToData(cpus, cpumap, &dummy) < 0)
+ goto cleanup;
+
+ ret = virBitmapSize(cpus);
+
+cleanup:
+ if (vm)
+ virObjectUnlock(vm);
+ virBitmapFree(cpus);
+ VIR_FREE(vcpuinfo);
+ VIR_FREE(agentinfo);
+ return ret;
+}
+#undef MATCH
static virDriver qemuDriver = {
.no = VIR_DRV_QEMU,
@@ -15378,6 +15556,7 @@ static virDriver qemuDriver = {
.nodeGetCPUMap = qemuNodeGetCPUMap, /* 1.0.0 */
.domainFSTrim = qemuDomainFSTrim, /* 1.0.1 */
.domainOpenChannel = qemuDomainOpenChannel, /* 1.0.2 */
+ .domainGetVCPUMap = qemuDomainGetVCPUMap, /* 1.0.7 */
};
--
1.8.2.1