[libvirt] [PATCH 0/7] Add support for guest-based CPU-hotplug and unplug

This is a initial support for guest-agent based cpu hotplug that was commited in upstream qemu and should be released in qemu 1.5.0. This patchset relies on the existing qemuDomainSetVcpusFlags API that is not really ideal for this purpose but is usable. I'm planing on adding a specific-cpu hotplug API later that will allow more control. Peter Krempa (7): lib: Fix docs about return value of virDomainGetVcpusFlags() virsh-domain: Refactor cmdVcpucount and fix output on inactive domains qemu_agent: Add support for appending arrays to commands qemu_agent: Introduce helpers for agent based CPU hot(un)plug Introduce flag VIR_DOMAIN_VCPU agent, for agent based CPU unlpug(shutdown). qemu: Implement request of vCPU state using the guest agent qemu: Implement support for VIR_DOMAIN_VCPU in qemuDomainSetVcpusFlags include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 9 +- src/qemu/qemu_agent.c | 141 +++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++ src/qemu/qemu_driver.c | 181 ++++++++++++++++++++++++--- tools/virsh-domain.c | 283 +++++++++++++++++++++---------------------- tools/virsh.pod | 13 +- 7 files changed, 474 insertions(+), 166 deletions(-) -- 1.8.1.5

The return value description stated that 0 is returned in case of success instead of the count of vCPUs. --- src/libvirt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libvirt.c b/src/libvirt.c index c5221f5..c8728b7 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8957,7 +8957,7 @@ error: * virtual CPU limit is queried. Otherwise, this call queries the * current virtual CPU limit. * - * Returns 0 in case of success, -1 in case of failure. + * Returns the number of vCPUs in case of success, -1 in case of failure. */ int -- 1.8.1.5

On 04/15/2013 09:11 AM, Peter Krempa wrote:
The return value description stated that 0 is returned in case of success instead of the count of vCPUs. --- src/libvirt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
ACK.
diff --git a/src/libvirt.c b/src/libvirt.c index c5221f5..c8728b7 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8957,7 +8957,7 @@ error: * virtual CPU limit is queried. Otherwise, this call queries the * current virtual CPU limit. * - * Returns 0 in case of success, -1 in case of failure. + * Returns the number of vCPUs in case of success, -1 in case of failure. */
int
-- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 04/16/13 00:29, Eric Blake wrote:
On 04/15/2013 09:11 AM, Peter Krempa wrote:
The return value description stated that 0 is returned in case of success instead of the count of vCPUs. --- src/libvirt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
ACK.
Pushed. Thanks. Peter

This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts. The --current flag (this flag is assumed by default) now works also with --maximum or --active without the need to explicitly specify the state of the domain that is requested. This patch also fixes the output of "virsh vcpucount domain" on inactive domains: Before: $ virsh vcpucount domain maximum config 4 error: Requested operation is not valid: domain is not running current config 4 error: Requested operation is not valid: domain is not running After: $virsh vcpucount domain maximum config 4 current config 4 --- tools/virsh-domain.c | 258 +++++++++++++++++++++++---------------------------- 1 file changed, 115 insertions(+), 143 deletions(-) diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 4d0cc8f..5722d69 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5066,7 +5066,7 @@ static const vshCmdOptDef opts_vcpucount[] = { }, {.name = "maximum", .type = VSH_OT_BOOL, - .help = N_("get maximum cap on vcpus") + .help = N_("get maximum count of vcpus") }, {.name = "active", .type = VSH_OT_BOOL, @@ -5087,36 +5087,103 @@ static const vshCmdOptDef opts_vcpucount[] = { {.name = NULL} }; +/** + * Collect the number of vCPUs for a guest possibly with fallback means. + * + * Returns the count of vCPUs for a domain and certain flags. Returns -2 in case + * of error. In case live stats can't be collected when the domain is inactive, + * -1 is returned and no error is reported. + */ + +static int +vshCPUCountCollect(vshControl *ctl, + virDomainPtr dom, + unsigned int flags) +{ + int ret = -2; + virDomainInfo info; + int count; + char *def = NULL; + xmlDocPtr xml = NULL; + xmlXPathContextPtr ctxt = NULL; + + if (flags & VIR_DOMAIN_AFFECT_LIVE && + virDomainIsActive(dom) < 1) + return -1; + + /* In all cases, try the new API first; if it fails because we are talking + * to an older daemon, generally we try a fallback API before giving up. + * --current requires the new API, since we don't know whether the domain is + * running or inactive. */ + if ((count = virDomainGetVcpusFlags(dom, flags)) >= 0) + return count; + + /* fallback code */ + if (!(last_error->code == VIR_ERR_NO_SUPPORT || + last_error->code == VIR_ERR_INVALID_ARG)) + goto cleanup; + + if (!(flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) && + virDomainIsActive(dom) == 1) + flags |= VIR_DOMAIN_AFFECT_LIVE; + + vshResetLibvirtError(); + + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + count = virDomainGetMaxVcpus(dom); + } else { + if (virDomainGetInfo(dom, &info) < 0) + goto cleanup; + + count = info.nrVirtCpu; + } + } else { + if (!(def = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE))) + goto cleanup; + + if (!(xml = virXMLParseStringCtxt(def, _("(domain_definition)"), &ctxt))) + goto cleanup; + + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + if (virXPathInt("string(/domain/vcpus)", ctxt, &count) < 0) { + vshError(ctl, "%s", _("Failed to retrieve maximum vcpu count")); + goto cleanup; + } + } else { + if (virXPathInt("string(/domain/vcpus/@current)", + ctxt, &count) < 0) { + vshError(ctl, "%s", _("Failed to retrieve current vcpu count")); + goto cleanup; + } + } + } + + ret = count; +cleanup: + VIR_FREE(def); + xmlXPathFreeContext(ctxt); + xmlFreeDoc(xml); + + return ret; +} + static bool cmdVcpucount(vshControl *ctl, const vshCmd *cmd) { virDomainPtr dom; - bool ret = true; + bool ret = false; bool maximum = vshCommandOptBool(cmd, "maximum"); bool active = vshCommandOptBool(cmd, "active"); bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); bool all = maximum + active + current + config + live == 0; - int count; + unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; - /* We want one of each pair of mutually exclusive options; that - * is, use of flags requires exactly two options. We reject the - * use of more than 2 flags later on. */ - if (maximum + active + current + config + live == 1) { - if (maximum || active) { - vshError(ctl, - _("when using --%s, one of --config, --live, or --current " - "must be specified"), - maximum ? "maximum" : "active"); - } else { - vshError(ctl, - _("when using --%s, either --maximum or --active must be " - "specified"), - (current ? "current" : config ? "config" : "live")); - } - return false; - } + VSH_EXCLUSIVE_OPTIONS_VAR(current, live); + VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum); /* Backwards compatibility: prior to 0.9.4, * VIR_DOMAIN_AFFECT_CURRENT was unsupported, and --current meant @@ -5129,140 +5196,45 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) active = true; } - if (maximum && active) { - vshError(ctl, "%s", - _("--maximum and --active cannot both be specified")); - return false; - } - if (current + config + live > 1) { - vshError(ctl, "%s", - _("--config, --live, and --current are mutually exclusive")); - return false; - } + if (live) + flags |= VIR_DOMAIN_AFFECT_LIVE; + if (config) + flags |= VIR_DOMAIN_AFFECT_CONFIG; + if (maximum) + flags |= VIR_DOMAIN_VCPU_MAXIMUM; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) return false; - /* In all cases, try the new API first; if it fails because we are - * talking to an older client, generally we try a fallback API - * before giving up. --current requires the new API, since we - * don't know whether the domain is running or inactive. */ - if (current) { - count = virDomainGetVcpusFlags(dom, - maximum ? VIR_DOMAIN_VCPU_MAXIMUM : 0); - if (count < 0) { - vshReportError(ctl); - ret = false; - } else { - vshPrint(ctl, "%d\n", count); - } - } - - if (all || (maximum && config)) { - count = virDomainGetVcpusFlags(dom, (VIR_DOMAIN_VCPU_MAXIMUM | - VIR_DOMAIN_AFFECT_CONFIG)); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - char *tmp; - char *xml = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE); - if (xml && (tmp = strstr(xml, "<vcpu"))) { - tmp = strchr(tmp, '>'); - if (!tmp || virStrToLong_i(tmp + 1, &tmp, 10, &count) < 0) - count = -1; - } - vshResetLibvirtError(); - VIR_FREE(xml); - } + if (all) { + int conf_max = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_CONFIG | + VIR_DOMAIN_VCPU_MAXIMUM); + int conf_cur = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_CONFIG); + int live_max = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_LIVE | + VIR_DOMAIN_VCPU_MAXIMUM); + int live_cur = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_LIVE); - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("config"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } - - if (all || (maximum && live)) { - count = virDomainGetVcpusFlags(dom, (VIR_DOMAIN_VCPU_MAXIMUM | - VIR_DOMAIN_AFFECT_LIVE)); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - count = virDomainGetMaxVcpus(dom); - } + if (conf_max == -2 || conf_cur == -2 || live_max == -2 || live_cur == -2) + goto cleanup; - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("live"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } + vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("config"), conf_max); + if (live_max > 0) + vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("live"), live_max); + vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("config"), conf_cur); + if (live_cur > 0) + vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("live"), live_cur); + } else { + int count = vshCPUCountCollect(ctl, dom, flags); - if (all || (active && config)) { - count = virDomainGetVcpusFlags(dom, VIR_DOMAIN_AFFECT_CONFIG); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - char *tmp, *end; - char *xml = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE); - if (xml && (tmp = strstr(xml, "<vcpu"))) { - end = strchr(tmp, '>'); - if (end) { - *end = '\0'; - tmp = strstr(tmp, "current="); - if (!tmp) - tmp = end + 1; - else { - tmp += strlen("current="); - tmp += *tmp == '\'' || *tmp == '"'; - } - } - if (!tmp || virStrToLong_i(tmp, &tmp, 10, &count) < 0) - count = -1; - } - VIR_FREE(xml); - } + if (count < 0) + goto cleanup; - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("config"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); + vshPrint(ctl, "%d\n", count); } - if (all || (active && live)) { - count = virDomainGetVcpusFlags(dom, VIR_DOMAIN_AFFECT_LIVE); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - virDomainInfo info; - if (virDomainGetInfo(dom, &info) == 0) - count = info.nrVirtCpu; - } - - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("live"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } + ret = true; +cleanup: virDomainFree(dom); return ret; } -- 1.8.1.5

On 04/15/2013 09:11 AM, Peter Krempa wrote:
This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts.
The --current flag (this flag is assumed by default) now works also with --maximum or --active without the need to explicitly specify the state of the domain that is requested.
This patch also fixes the output of "virsh vcpucount domain" on inactive domains:
Before: $ virsh vcpucount domain maximum config 4 error: Requested operation is not valid: domain is not running current config 4 error: Requested operation is not valid: domain is not running
After: $virsh vcpucount domain maximum config 4 current config 4
Looks nicer; and I don't think we ever promised that there would always be exactly four lines of output. What about the converse case, of attempting to query the config of a transient domain? Should you also clean up that output to avoid error messages?
--- tools/virsh-domain.c | 258 +++++++++++++++++++++++---------------------------- 1 file changed, 115 insertions(+), 143 deletions(-)
cmdVcpucount(vshControl *ctl, const vshCmd *cmd) { virDomainPtr dom; - bool ret = true; + bool ret = false; bool maximum = vshCommandOptBool(cmd, "maximum"); bool active = vshCommandOptBool(cmd, "active"); bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); bool all = maximum + active + current + config + live == 0; - int count; + unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT;
- /* We want one of each pair of mutually exclusive options; that - * is, use of flags requires exactly two options. We reject the - * use of more than 2 flags later on. */ - if (maximum + active + current + config + live == 1) { - if (maximum || active) { - vshError(ctl, - _("when using --%s, one of --config, --live, or --current " - "must be specified"), - maximum ? "maximum" : "active"); - } else { - vshError(ctl, - _("when using --%s, either --maximum or --active must be " - "specified"), - (current ? "current" : config ? "config" : "live")); - } - return false; - } + VSH_EXCLUSIVE_OPTIONS_VAR(current, live);
NACK to this line, at least in this location. It prevents...
+ VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum);
/* Backwards compatibility: prior to 0.9.4, * VIR_DOMAIN_AFFECT_CURRENT was unsupported, and --current meant
...this backwards-compatibility hack, where '--current --live' must behave like the modern '--active --live'.
@@ -5129,140 +5196,45 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) active = true; }
- if (maximum && active) { - vshError(ctl, "%s", - _("--maximum and --active cannot both be specified")); - return false; - } - if (current + config + live > 1) { - vshError(ctl, "%s", - _("--config, --live, and --current are mutually exclusive")); - return false; - }
Moving it here would be okay, though. Everything else seemed okay. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 04/16/13 00:44, Eric Blake wrote:
On 04/15/2013 09:11 AM, Peter Krempa wrote:
This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts.
The --current flag (this flag is assumed by default) now works also with --maximum or --active without the need to explicitly specify the state of the domain that is requested.
This patch also fixes the output of "virsh vcpucount domain" on inactive domains:
Before: $ virsh vcpucount domain maximum config 4 error: Requested operation is not valid: domain is not running current config 4 error: Requested operation is not valid: domain is not running
After: $virsh vcpucount domain maximum config 4 current config 4
Looks nicer; and I don't think we ever promised that there would always be exactly four lines of output. What about the converse case, of attempting to query the config of a transient domain? Should you also clean up that output to avoid error messages?
--- tools/virsh-domain.c | 258 +++++++++++++++++++++++---------------------------- 1 file changed, 115 insertions(+), 143 deletions(-)
cmdVcpucount(vshControl *ctl, const vshCmd *cmd) { virDomainPtr dom; - bool ret = true; + bool ret = false; bool maximum = vshCommandOptBool(cmd, "maximum"); bool active = vshCommandOptBool(cmd, "active"); bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); bool all = maximum + active + current + config + live == 0; - int count; + unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT;
- /* We want one of each pair of mutually exclusive options; that - * is, use of flags requires exactly two options. We reject the - * use of more than 2 flags later on. */ - if (maximum + active + current + config + live == 1) { - if (maximum || active) { - vshError(ctl, - _("when using --%s, one of --config, --live, or --current " - "must be specified"), - maximum ? "maximum" : "active"); - } else { - vshError(ctl, - _("when using --%s, either --maximum or --active must be " - "specified"), - (current ? "current" : config ? "config" : "live")); - } - return false; - } + VSH_EXCLUSIVE_OPTIONS_VAR(current, live);
NACK to this line, at least in this location. It prevents...
+ VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum);
/* Backwards compatibility: prior to 0.9.4, * VIR_DOMAIN_AFFECT_CURRENT was unsupported, and --current meant
...this backwards-compatibility hack, where '--current --live' must behave like the modern '--active --live'.
Hmm, yeah, I didn't port the compatibility code correctly here. Even if the check is moved, the compatibility code needs to be updated. Peter

Add support for array elements for agent commands just like 64d5e815 did for monitor commands --- src/qemu/qemu_agent.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 3e26cf1..8c8c38b 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1209,6 +1209,10 @@ qemuAgentMakeCommand(const char *cmdname, case 'n': { ret = virJSONValueObjectAppendNull(jargs, key); } break; + case 'a': { + virJSONValuePtr val = va_arg(args, virJSONValuePtr); + ret = virJSONValueObjectAppend(jargs, key, val); + } break; default: virReportError(VIR_ERR_INTERNAL_ERROR, _("unsupported data type '%c' for arg '%s'"), type, key - 2); -- 1.8.1.5

On 04/15/2013 09:11 AM, Peter Krempa wrote:
Add support for array elements for agent commands just like 64d5e815 did for monitor commands
Always fun to see someone else using one of my commits as a starting point - the virtual pay day of open source development!
--- src/qemu/qemu_agent.c | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 3e26cf1..8c8c38b 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1209,6 +1209,10 @@ qemuAgentMakeCommand(const char *cmdname, case 'n': { ret = virJSONValueObjectAppendNull(jargs, key); } break; + case 'a': { + virJSONValuePtr val = va_arg(args, virJSONValuePtr); + ret = virJSONValueObjectAppend(jargs, key, val); + } break;
ACK. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 04/16/13 00:53, Eric Blake wrote:
On 04/15/2013 09:11 AM, Peter Krempa wrote:
Add support for array elements for agent commands just like 64d5e815 did for monitor commands
Always fun to see someone else using one of my commits as a starting point - the virtual pay day of open source development!
Indeed! :)
--- src/qemu/qemu_agent.c | 4 ++++ 1 file changed, 4 insertions(+)
ACK.
Pushed. Thanks Peter

The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 +++++ 2 files changed, 149 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 8c8c38b..ba2d187 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1455,3 +1455,140 @@ qemuAgentFSTrim(qemuAgentPtr mon, virJSONValueFree(reply); return ret; } + +int +qemuAgentGetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr *info) +{ + int ret = -1; + int i; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + virJSONValuePtr data = NULL; + + if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL))) + return -1; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (!(data = virJSONValueObjectGet(reply, "return"))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus reply was missing return data")); + goto cleanup; + } + + if (data->type != VIR_JSON_TYPE_ARRAY) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus return information was not an array")); + goto cleanup; + } + + if (VIR_ALLOC_N(*info, virJSONValueArraySize(data)) < 0) { + virReportOOMError(); + goto cleanup; + } + + for (i = 0 ; i < virJSONValueArraySize(data) ; i++) { + virJSONValuePtr entry = virJSONValueArrayGet(data, i); + qemuAgentCPUInfoPtr in = *info + i; + + if (!entry) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("array element missing in guest-get-vcpus return " + "value")); + goto cleanup; + } + + if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'logical-id' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'online' missing in reply of guest-get-vcpus ")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "can-offline", + &in->offlinable) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'can-offline' missing in reply of guest-get-vcpus")); + goto cleanup; + } + } + + ret = virJSONValueArraySize(data); + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(data); + return ret; +} + +int +qemuAgentSetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr info, + size_t ninfo) +{ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + virJSONValuePtr cpus = NULL; + virJSONValuePtr cpu = NULL; + size_t i; + + /* create the key data array */ + if (!(cpus = virJSONValueNewArray())) + goto no_memory; + + for (i = 0; i < ninfo; i++) { + qemuAgentCPUInfoPtr in = &info[i]; + + /* create single cpu object */ + if (!(cpu = virJSONValueNewObject())) + goto no_memory; + + if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0) + goto no_memory; + + if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0) + goto no_memory; + + if (virJSONValueArrayAppend(cpus, cpu) < 0) + goto no_memory; + + cpu = NULL; + } + + if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus", + "a:vcpus", cpus, + NULL))) + goto cleanup; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("malformed return value")); + } + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(cpu); + virJSONValueFree(cpus); + return ret; + +no_memory: + virReportOOMError(); + goto cleanup; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index ba04e61..79be283 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -82,4 +82,16 @@ int qemuAgentArbitraryCommand(qemuAgentPtr mon, int timeout); int qemuAgentFSTrim(qemuAgentPtr mon, unsigned long long minimum); + + +typedef struct _qemuAgentCPUInfo qemuAgentCPUInfo; +typedef qemuAgentCPUInfo *qemuAgentCPUInfoPtr; +struct _qemuAgentCPUInfo { + unsigned int id; /* locigal cpu ID */ + bool online; /* true if the CPU is activated */ + bool offlinable; /* true if the CPU can be offlined - ignored when setting*/ +}; + +int qemuAgentGetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr *info); +int qemuAgentSetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr cpus, size_t ncpus); #endif /* __QEMU_AGENT_H__ */ -- 1.8.1.5

On 04/15/2013 09:11 AM, Peter Krempa wrote:
The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 137 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 +++++ 2 files changed, 149 insertions(+)
diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c
+int +qemuAgentGetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr *info) +{ + int ret = -1; + int i; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + virJSONValuePtr data = NULL; + + if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL))) + return -1; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (!(data = virJSONValueObjectGet(reply, "return"))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus reply was missing return data")); + goto cleanup; + } + + if (data->type != VIR_JSON_TYPE_ARRAY) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus return information was not an array")); + goto cleanup; + } + + if (VIR_ALLOC_N(*info, virJSONValueArraySize(data)) < 0) { + virReportOOMError(); + goto cleanup; + } + + for (i = 0 ; i < virJSONValueArraySize(data) ; i++) {
Quite a few calls to this...
+ virJSONValuePtr entry = virJSONValueArrayGet(data, i); + qemuAgentCPUInfoPtr in = *info + i; + + if (!entry) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("array element missing in guest-get-vcpus return " + "value")); + goto cleanup; + } + + if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'logical-id' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'online' missing in reply of guest-get-vcpus "));
trailing space in the error message
+ goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "can-offline", + &in->offlinable) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'can-offline' missing in reply of guest-get-vcpus")); + goto cleanup; + } + } + + ret = virJSONValueArraySize(data);
...and again. Worth caching in a local variable?
+ +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(data); + return ret; +}
I checked qemu's qga/qapi-schema.json, and this matches the documentation.
+ +int +qemuAgentSetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr info, + size_t ninfo) +{ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + virJSONValuePtr cpus = NULL; + virJSONValuePtr cpu = NULL; + size_t i; + + /* create the key data array */ + if (!(cpus = virJSONValueNewArray())) + goto no_memory; + + for (i = 0; i < ninfo; i++) { + qemuAgentCPUInfoPtr in = &info[i]; + + /* create single cpu object */ + if (!(cpu = virJSONValueNewObject())) + goto no_memory; + + if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0) + goto no_memory; + + if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0) + goto no_memory; + + if (virJSONValueArrayAppend(cpus, cpu) < 0) + goto no_memory; + + cpu = NULL; + } + + if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus", + "a:vcpus", cpus, + NULL))) + goto cleanup;
This has transfer semantics - after this point, cpus is owned by cmd. You need to do: cpus = NULL here...
+ + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("malformed return value")); + }
Potential problem. In the qga documentation, this function is documented as returning < ninfo if the command partially succeeded. I don't know how you plan on using this in later functions, but it might be worth documenting that this function has the same read()-like semantics (a return of -1 is outright error, a return of 0 meant no changes were requested, a return of < ninfo means the first few values were processed before hitting an error, and a return of ninfo means all changes were successful).
+ +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(cpu); + virJSONValueFree(cpus);
...in order to avoid a double-free here.
+ return ret; + +no_memory: + virReportOOMError(); + goto cleanup; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index ba04e61..79be283 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -82,4 +82,16 @@ int qemuAgentArbitraryCommand(qemuAgentPtr mon, int timeout); int qemuAgentFSTrim(qemuAgentPtr mon, unsigned long long minimum); + + +typedef struct _qemuAgentCPUInfo qemuAgentCPUInfo; +typedef qemuAgentCPUInfo *qemuAgentCPUInfoPtr; +struct _qemuAgentCPUInfo { + unsigned int id; /* locigal cpu ID */
s/locigal/logical/
+ bool online; /* true if the CPU is activated */ + bool offlinable; /* true if the CPU can be offlined - ignored when setting*/
space before */, and maybe wrap this long line Enough comments that it is probably worth seeing a v2 (or at least waiting for my review of the client of this new function in the tail of the series). -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

This flag will allow to use qemu guest agent commands to disable processors in a live guest. --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 7 +++++++ tools/virsh-domain.c | 25 +++++++++++++++++++++++-- tools/virsh.pod | 13 ++++++++++--- 4 files changed, 41 insertions(+), 5 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 518f0fe..f12173d 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2120,6 +2120,7 @@ typedef enum { /* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_VCPU_MAXIMUM = (1 << 2), /* Max rather than current count */ + VIR_DOMAIN_VCPU_AGENT = (1 << 3), /* Use guest-agent based cpu hotplug */ } virDomainVcpuFlags; int virDomainSetVcpus (virDomainPtr domain, diff --git a/src/libvirt.c b/src/libvirt.c index c8728b7..0b780cf 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8887,6 +8887,10 @@ error: * equal to virConnectGetMaxVcpus(). Otherwise, this call affects the * current virtual CPU limit, which must be less than or equal to the * maximum limit. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. + * * Not all hypervisors can support all flag combinations. * * Returns 0 in case of success, -1 in case of failure. @@ -8957,6 +8961,9 @@ error: * virtual CPU limit is queried. Otherwise, this call queries the * current virtual CPU limit. * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. + * * Returns the number of vCPUs in case of success, -1 in case of failure. */ diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 5722d69..0989192 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5084,6 +5084,10 @@ static const vshCmdOptDef opts_vcpucount[] = { .type = VSH_OT_BOOL, .help = N_("get value according to current domain state") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5123,6 +5127,11 @@ vshCPUCountCollect(vshControl *ctl, last_error->code == VIR_ERR_INVALID_ARG)) goto cleanup; + if (flags & VIR_DOMAIN_VCPU_AGENT) { + vshError(ctl, "%s", _("Failed to retrieve vCPU count via guest agent")); + goto cleanup; + } + if (!(flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) && virDomainIsActive(dom) == 1) flags |= VIR_DOMAIN_AFFECT_LIVE; @@ -5178,12 +5187,14 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); - bool all = maximum + active + current + config + live == 0; + bool agent = vshCommandOptBool(cmd, "agent"); + bool all = maximum + active + current + config + live + agent == 0; unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); /* Backwards compatibility: prior to 0.9.4, * VIR_DOMAIN_AFFECT_CURRENT was unsupported, and --current meant @@ -5202,6 +5213,8 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (maximum) flags |= VIR_DOMAIN_VCPU_MAXIMUM; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) return false; @@ -5792,6 +5805,10 @@ static const vshCmdOptDef opts_setvcpus[] = { .type = VSH_OT_BOOL, .help = N_("affect current domain") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5805,17 +5822,21 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); + bool agent = vshCommandOptBool(cmd, "agent"); unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); if (config) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; /* none of the options were specified */ - if (!current && !live && !config && !maximum) + if (!current && flags == 0) flags = -1; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) diff --git a/tools/virsh.pod b/tools/virsh.pod index 11984bc..49438aa 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1585,7 +1585,7 @@ exclusive. If no flag is specified, behavior is different depending on hypervisor. =item B<setvcpus> I<domain> I<count> [I<--maximum>] [[I<--config>] -[I<--live>] | [I<--current>]] +[I<--live>] | [I<--current>]] [I<--agent>] Change the number of virtual CPUs active in a guest domain. By default, this command works on active guest domains. To change the settings for an @@ -1611,6 +1611,10 @@ is up to the hypervisor whether the I<--config> flag is also assumed, and therefore whether the XML configuration is adjusted to make the change persistent. +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. + The I<--maximum> flag controls the maximum number of virtual cpus that can be hot-plugged the next time the domain is booted. As such, it must only be used with the I<--config> flag, and not with the I<--live> flag. @@ -1730,7 +1734,7 @@ NOTE: For an inactive domain, the domain name or UUID must be used as the I<domain>. =item B<vcpucount> I<domain> [{I<--maximum> | I<--active>} -{I<--config> | I<--live> | I<--current>}] +{I<--config> | I<--live> | I<--current>}] [I<--agent>] Print information about the virtual cpu counts of the given I<domain>. If no flags are specified, all possible counts are @@ -1747,7 +1751,10 @@ time the domain will be booted, I<--live> requires a running domain and lists current values, and I<--current> queries according to the current state of the domain (corresponding to I<--live> if running, or I<--config> if inactive); these three flags are mutually exclusive. -Thus, this command always takes exactly zero or two flags. + +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. =item B<vcpuinfo> I<domain> -- 1.8.1.5

On 04/15/2013 09:11 AM, Peter Krempa wrote: Subject line is long, and doesn't match...
This flag will allow to use qemu guest agent commands to disable processors in a live guest. --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 7 +++++++ tools/virsh-domain.c | 25 +++++++++++++++++++++++-- tools/virsh.pod | 13 ++++++++++--- 4 files changed, 41 insertions(+), 5 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 518f0fe..f12173d 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2120,6 +2120,7 @@ typedef enum {
/* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_VCPU_MAXIMUM = (1 << 2), /* Max rather than current count */ + VIR_DOMAIN_VCPU_AGENT = (1 << 3), /* Use guest-agent based cpu hotplug */
...actual flag name. May I suggest: API: add VIR_DOMAIN_VCPU_AGENT for agent-based vcpu hotplug
} virDomainVcpuFlags;
int virDomainSetVcpus (virDomainPtr domain, diff --git a/src/libvirt.c b/src/libvirt.c index c8728b7..0b780cf 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8887,6 +8887,10 @@ error: * equal to virConnectGetMaxVcpus(). Otherwise, this call affects the * current virtual CPU limit, which must be less than or equal to the * maximum limit. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain.
I'm assuming that VCPU_AGENT and VCPU_MAXIMUM are mutually exclusive (the guest cannot increase or decrease the maximum number of vcpus that the hypervisor uses as its limit); also the flag is useless if not specified on a running domain. Probably worth mentioning this in the docs.
+ * * Not all hypervisors can support all flag combinations. * * Returns 0 in case of success, -1 in case of failure. @@ -8957,6 +8961,9 @@ error: * virtual CPU limit is queried. Otherwise, this call queries the * current virtual CPU limit. * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain.
This is under virDomainGetVcpusFlags(), which reads rather than queries the number of processors. But here, VCPU_AGENT|MAXIMUM makes sense - a guest might see a (smaller) maximum of possible vcpus that could still be hotplugged, in relation to the hypervisor limit.
+ * * Returns the number of vCPUs in case of success, -1 in case of failure. */
You should probably encode any mutual exclusion directly into libvirt.c, so that hypervisor drivers don't have to repeat the same logic. But looks like a good start for a useful addition. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

This patch implements the VIR_DOMAIN_VCPU flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e3acdab..e54d8b9 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4395,17 +4395,24 @@ static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + int i; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!(vm = qemuDomObjFromDomain(dom))) - goto cleanup; + return -1; + + priv = vm->privateData; if (!(caps = virQEMUDriverGetCapabilities(driver, false))) goto cleanup; @@ -4414,16 +4421,61 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) vm, &flags, &def) < 0) goto cleanup; - if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (flags & VIR_DOMAIN_AFFECT_LIVE) def = vm->def; + + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("vCPU count provided by the guest agent can only be " + " requested for live domains")); + goto cleanup; + } + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ncpuinfo < 0) + goto cleanup; + + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + ret = ncpuinfo; + goto cleanup; + } + + /* count the online vcpus */ + ret = 0; + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + ret++; + } + } else { + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) + ret = def->maxvcpus; + else + ret = def->vcpus; } - ret = (flags & VIR_DOMAIN_VCPU_MAXIMUM) ? def->maxvcpus : def->vcpus; cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); return ret; } -- 1.8.1.5

This patch adds support for agent-based cpu disabling and enabling to qemuDomainSetVcpusFlags() API. --- src/qemu/qemu_driver.c | 121 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 108 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e54d8b9..6f2d6ff 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3695,6 +3695,68 @@ unsupported: static int +qemuDomainPrepareAgentVCPUs(unsigned int nvcpus, + qemuAgentCPUInfoPtr cpuinfo, + int ncpuinfo) +{ + int i; + int nonline = 0; + int nofflinable = 0; + + /* count the active and offlinable cpus */ + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + nonline++; + + if (cpuinfo[i].offlinable && cpuinfo[i].online) + nofflinable++; + + /* This shouldn't happen, but we can't trust the guest agent */ + if (!cpuinfo[i].online && !cpuinfo[i].offlinable) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Invalid data provided by guest agent")); + return -1; + } + } + + /* the guest agent reported less cpus than requested */ + if (nvcpus > ncpuinfo) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest agent reports less cpu than requested")); + return -1; + } + + /* not enough offlinable CPUs to support the request */ + if (nvcpus < nonline - nofflinable) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("Cannot offline enough CPUs")); + return -1; + } + + for (i = 0; i < ncpuinfo; i++) { + if (nvcpus < nonline) { + /* unplug */ + if (cpuinfo[i].offlinable && cpuinfo[i].online) { + cpuinfo[i].online = false; + nonline--; + } + } else if (nvcpus > nonline) { + /* plug */ + if (!cpuinfo[i].online) { + cpuinfo[i].online = true; + nonline++; + } + } else { + /* done */ + break; + } + } + + return 0; +} + + +static int qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, unsigned int flags) { @@ -3705,10 +3767,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, bool maximum; virQEMUDriverConfigPtr cfg = NULL; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!nvcpus || (unsigned short) nvcpus != nvcpus) { virReportError(VIR_ERR_INVALID_ARG, @@ -3723,6 +3789,8 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, if (!(vm = qemuDomObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) goto cleanup; @@ -3748,22 +3816,48 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, goto endjob; } - if (flags & VIR_DOMAIN_AFFECT_LIVE) { - if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("chainging of maximum vCPU count isn't supported " + "via guest agent")); goto endjob; - } - - if (flags & VIR_DOMAIN_AFFECT_CONFIG) { - if (maximum) { - persistentDef->maxvcpus = nvcpus; - if (nvcpus < persistentDef->vcpus) - persistentDef->vcpus = nvcpus; - } else { - persistentDef->vcpus = nvcpus; } - if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + + if (ncpuinfo < 0) + goto endjob; + + if (qemuDomainPrepareAgentVCPUs(nvcpus, cpuinfo, ncpuinfo) < 0) + goto endjob; + + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentSetVCPUs(priv->agent, cpuinfo, ncpuinfo); + qemuDomainObjExitAgent(vm); + + if (ncpuinfo < 0) goto endjob; + } else { + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + goto endjob; + } + + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + if (maximum) { + persistentDef->maxvcpus = nvcpus; + if (nvcpus < persistentDef->vcpus) + persistentDef->vcpus = nvcpus; + } else { + persistentDef->vcpus = nvcpus; + } + + if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + goto endjob; + } } ret = 0; @@ -3776,6 +3870,7 @@ cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); virObjectUnref(cfg); return ret; } -- 1.8.1.5
participants (2)
-
Eric Blake
-
Peter Krempa