[libvirt] [PATCHv2 0/6] Add support for guest-agent based cpu hot(un)plug

This version contains fixes requested by Eric on patches 1/6, 3/6 and 4/6. Patch 2/6 is new in the series. I tweaked some details in 5/6 and 6/6 but they weren't reviewed in version 1. Changes are described in the patches themselves. Peter Krempa (6): virsh-domain: Refactor cmdVcpucount and fix output on inactive domains conf: Reword error message to be more universal qemu_agent: Introduce helpers for agent based CPU hot(un)plug API: Introduce VIR_DOMAIN_VCPU_AGENT, for agent based CPU hot(un)plug qemu: Implement request of vCPU state using the guest agent qemu: Implement support for VIR_DOMAIN_VCPU in qemuDomainSetVcpusFlags include/libvirt/libvirt.h.in | 1 + src/conf/domain_conf.c | 2 +- src/libvirt.c | 24 +++- src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++ src/qemu/qemu_driver.c | 189 +++++++++++++++++++++++++--- tools/virsh-domain.c | 292 +++++++++++++++++++++---------------------- tools/virsh.pod | 13 +- 8 files changed, 510 insertions(+), 171 deletions(-) -- 1.8.1.5

This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts. The --current flag (this flag is assumed by default) now works also with --maximum or --active without the need to explicitly specify the state of the domain that is requested. This patch also fixes the output of "virsh vcpucount domain" on inactive domains: Before: $ virsh vcpucount domain maximum config 4 error: Requested operation is not valid: domain is not running current config 4 error: Requested operation is not valid: domain is not running After: $virsh vcpucount domain maximum config 4 current config 4 .. and for transient domains too: Before: $ virsh vcpucount transient-domain error: Requested operation is not valid: cannot change persistent config of a transient domain maximum live 3 error: Requested operation is not valid: cannot change persistent config of a transient domain current live 1 After: $ virsh vcpucount transient-domain maximum live 3 current live 1 --- Notes: Version 2: - think of transient guests too - fix ordering of compatibility code to actually work - print error on specific requests that can't be fulfilled tools/virsh-domain.c | 267 +++++++++++++++++++++++---------------------------- 1 file changed, 121 insertions(+), 146 deletions(-) diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 4d0cc8f..5726744 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5066,7 +5066,7 @@ static const vshCmdOptDef opts_vcpucount[] = { }, {.name = "maximum", .type = VSH_OT_BOOL, - .help = N_("get maximum cap on vcpus") + .help = N_("get maximum count of vcpus") }, {.name = "active", .type = VSH_OT_BOOL, @@ -5087,36 +5087,102 @@ static const vshCmdOptDef opts_vcpucount[] = { {.name = NULL} }; +/** + * Collect the number of vCPUs for a guest possibly with fallback means. + * + * Returns the count of vCPUs for a domain and certain flags. Returns -2 in case + * of error. If @checkState is true, in case live stats can't be collected when + * the domain is inactive or persistent stats can't be collected if domain is + * transient -1 is returned and no error is reported. + */ + +static int +vshCPUCountCollect(vshControl *ctl, + virDomainPtr dom, + unsigned int flags, + bool checkState) +{ + int ret = -2; + virDomainInfo info; + int count; + char *def = NULL; + xmlDocPtr xml = NULL; + xmlXPathContextPtr ctxt = NULL; + + if (checkState && + ((flags & VIR_DOMAIN_AFFECT_LIVE && virDomainIsActive(dom) < 1) || + (flags & VIR_DOMAIN_AFFECT_CONFIG && virDomainIsPersistent(dom) < 1))) + return -1; + + /* In all cases, try the new API first; if it fails because we are talking + * to an older daemon, generally we try a fallback API before giving up. + * --current requires the new API, since we don't know whether the domain is + * running or inactive. */ + if ((count = virDomainGetVcpusFlags(dom, flags)) >= 0) + return count; + + /* fallback code */ + if (!(last_error->code == VIR_ERR_NO_SUPPORT || + last_error->code == VIR_ERR_INVALID_ARG)) + goto cleanup; + + if (!(flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) && + virDomainIsActive(dom) == 1) + flags |= VIR_DOMAIN_AFFECT_LIVE; + + vshResetLibvirtError(); + + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + count = virDomainGetMaxVcpus(dom); + } else { + if (virDomainGetInfo(dom, &info) < 0) + goto cleanup; + + count = info.nrVirtCpu; + } + } else { + if (!(def = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE))) + goto cleanup; + + if (!(xml = virXMLParseStringCtxt(def, _("(domain_definition)"), &ctxt))) + goto cleanup; + + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + if (virXPathInt("string(/domain/vcpus)", ctxt, &count) < 0) { + vshError(ctl, "%s", _("Failed to retrieve maximum vcpu count")); + goto cleanup; + } + } else { + if (virXPathInt("string(/domain/vcpus/@current)", + ctxt, &count) < 0) { + vshError(ctl, "%s", _("Failed to retrieve current vcpu count")); + goto cleanup; + } + } + } + + ret = count; +cleanup: + VIR_FREE(def); + xmlXPathFreeContext(ctxt); + xmlFreeDoc(xml); + + return ret; +} + static bool cmdVcpucount(vshControl *ctl, const vshCmd *cmd) { virDomainPtr dom; - bool ret = true; + bool ret = false; bool maximum = vshCommandOptBool(cmd, "maximum"); bool active = vshCommandOptBool(cmd, "active"); bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); bool all = maximum + active + current + config + live == 0; - int count; - - /* We want one of each pair of mutually exclusive options; that - * is, use of flags requires exactly two options. We reject the - * use of more than 2 flags later on. */ - if (maximum + active + current + config + live == 1) { - if (maximum || active) { - vshError(ctl, - _("when using --%s, one of --config, --live, or --current " - "must be specified"), - maximum ? "maximum" : "active"); - } else { - vshError(ctl, - _("when using --%s, either --maximum or --active must be " - "specified"), - (current ? "current" : config ? "config" : "live")); - } - return false; - } + unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; /* Backwards compatibility: prior to 0.9.4, * VIR_DOMAIN_AFFECT_CURRENT was unsupported, and --current meant @@ -5124,145 +5190,54 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) * --live' into the new '--active --live', while treating the new * '--maximum --current' correctly rather than rejecting it as * '--maximum --active'. */ - if (!maximum && !active && current) { + if (!maximum && !active && current) current = false; - active = true; - } - if (maximum && active) { - vshError(ctl, "%s", - _("--maximum and --active cannot both be specified")); - return false; - } - if (current + config + live > 1) { - vshError(ctl, "%s", - _("--config, --live, and --current are mutually exclusive")); - return false; - } + VSH_EXCLUSIVE_OPTIONS_VAR(current, live); + VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum); + + if (live) + flags |= VIR_DOMAIN_AFFECT_LIVE; + if (config) + flags |= VIR_DOMAIN_AFFECT_CONFIG; + if (maximum) + flags |= VIR_DOMAIN_VCPU_MAXIMUM; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) return false; - /* In all cases, try the new API first; if it fails because we are - * talking to an older client, generally we try a fallback API - * before giving up. --current requires the new API, since we - * don't know whether the domain is running or inactive. */ - if (current) { - count = virDomainGetVcpusFlags(dom, - maximum ? VIR_DOMAIN_VCPU_MAXIMUM : 0); - if (count < 0) { - vshReportError(ctl); - ret = false; - } else { - vshPrint(ctl, "%d\n", count); - } - } + if (all) { + int conf_max = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_CONFIG | + VIR_DOMAIN_VCPU_MAXIMUM, true); + int conf_cur = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_CONFIG, true); + int live_max = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_LIVE | + VIR_DOMAIN_VCPU_MAXIMUM, true); + int live_cur = vshCPUCountCollect(ctl, dom, VIR_DOMAIN_AFFECT_LIVE, true); - if (all || (maximum && config)) { - count = virDomainGetVcpusFlags(dom, (VIR_DOMAIN_VCPU_MAXIMUM | - VIR_DOMAIN_AFFECT_CONFIG)); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - char *tmp; - char *xml = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE); - if (xml && (tmp = strstr(xml, "<vcpu"))) { - tmp = strchr(tmp, '>'); - if (!tmp || virStrToLong_i(tmp + 1, &tmp, 10, &count) < 0) - count = -1; - } - vshResetLibvirtError(); - VIR_FREE(xml); - } - - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("config"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } + if (conf_max == -2 || conf_cur == -2 || live_max == -2 || live_cur == -2) + goto cleanup; - if (all || (maximum && live)) { - count = virDomainGetVcpusFlags(dom, (VIR_DOMAIN_VCPU_MAXIMUM | - VIR_DOMAIN_AFFECT_LIVE)); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - count = virDomainGetMaxVcpus(dom); - } +#define PRINT_COUNT(VAR, MAX, STATE) if (VAR > 0) \ + vshPrint(ctl, "%-12s %-12s %3d\n", _(MAX), _(STATE), VAR) + PRINT_COUNT(conf_max, "maximum", "config"); + PRINT_COUNT(live_max, "maximum", "live"); + PRINT_COUNT(conf_cur, "current", "config"); + PRINT_COUNT(live_cur, "current", "live"); +#undef PRINT_COUNT - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("maximum"), _("live"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } + } else { + int count = vshCPUCountCollect(ctl, dom, flags, false); - if (all || (active && config)) { - count = virDomainGetVcpusFlags(dom, VIR_DOMAIN_AFFECT_CONFIG); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - char *tmp, *end; - char *xml = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE); - if (xml && (tmp = strstr(xml, "<vcpu"))) { - end = strchr(tmp, '>'); - if (end) { - *end = '\0'; - tmp = strstr(tmp, "current="); - if (!tmp) - tmp = end + 1; - else { - tmp += strlen("current="); - tmp += *tmp == '\'' || *tmp == '"'; - } - } - if (!tmp || virStrToLong_i(tmp, &tmp, 10, &count) < 0) - count = -1; - } - VIR_FREE(xml); - } + if (count < 0) + goto cleanup; - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("config"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); + vshPrint(ctl, "%d\n", count); } - if (all || (active && live)) { - count = virDomainGetVcpusFlags(dom, VIR_DOMAIN_AFFECT_LIVE); - if (count < 0 && (last_error->code == VIR_ERR_NO_SUPPORT - || last_error->code == VIR_ERR_INVALID_ARG)) { - virDomainInfo info; - if (virDomainGetInfo(dom, &info) == 0) - count = info.nrVirtCpu; - } - - if (count < 0) { - vshReportError(ctl); - ret = false; - } else if (all) { - vshPrint(ctl, "%-12s %-12s %3d\n", _("current"), _("live"), - count); - } else { - vshPrint(ctl, "%d\n", count); - } - vshResetLibvirtError(); - } + ret = true; +cleanup: virDomainFree(dom); return ret; } -- 1.8.1.5

On 04/16/2013 08:00 AM, Peter Krempa wrote:
This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts.
+#define PRINT_COUNT(VAR, MAX, STATE) if (VAR > 0) \ + vshPrint(ctl, "%-12s %-12s %3d\n", _(MAX), _(STATE), VAR) + PRINT_COUNT(conf_max, "maximum", "config"); + PRINT_COUNT(live_max, "maximum", "live"); + PRINT_COUNT(conf_cur, "current", "config"); + PRINT_COUNT(live_cur, "current", "live"); +#undef PRINT_COUNT
Doesn't quite work with i18n. Since MAX and STATE are not string literals, they do not appear in the .po file. But since "maximum" isn't marked, it also doesn't appear. So nothing gets translated. You either need: _(MAX) in the macro, and N_("maximum") as argument to the macro, or plain MAX in the macro, and _("maximum") as argument to the macro. Also, using the name MAX as a macro parameter risks collisions with a common macro name of MAX(); it might be better to name the macro parameter something less likely to collide, like WHICH. Beyond that, it looks like your conversion does the right things. I assume you tested on all three of transient, persistent online, and persistent offline guests, and with a good set of combinations of arguments to make sure we aren't breaking back-compat for which numbers get reported. ACK with the macro fixed up. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

On 04/19/13 01:27, Eric Blake wrote:
On 04/16/2013 08:00 AM, Peter Krempa wrote:
This patch factors out the vCPU count retrieval including fallback means into vshCPUCountCollect() and removes the duplicated code to retrieve individual counts.
+#define PRINT_COUNT(VAR, MAX, STATE) if (VAR > 0) \ + vshPrint(ctl, "%-12s %-12s %3d\n", _(MAX), _(STATE), VAR) + PRINT_COUNT(conf_max, "maximum", "config"); + PRINT_COUNT(live_max, "maximum", "live"); + PRINT_COUNT(conf_cur, "current", "config"); + PRINT_COUNT(live_cur, "current", "live"); +#undef PRINT_COUNT
Doesn't quite work with i18n. Since MAX and STATE are not string literals, they do not appear in the .po file. But since "maximum" isn't marked, it also doesn't appear. So nothing gets translated.
You either need: _(MAX) in the macro, and N_("maximum") as argument to the macro, or plain MAX in the macro, and _("maximum") as argument to the macro.
Also, using the name MAX as a macro parameter risks collisions with a common macro name of MAX(); it might be better to name the macro parameter something less likely to collide, like WHICH.
Beyond that, it looks like your conversion does the right things. I assume you tested on all three of transient, persistent online, and persistent offline guests, and with a good set of combinations of arguments to make sure we aren't breaking back-compat for which numbers get reported.
ACK with the macro fixed up.
Thanks, I fixed the macro and double checked that the original flag meanings match the stuff reported by old virsh and pushed this patch. Peter

The error message that warns user when a request to chagne/get persistent configuration of a transient domain is requested suggests that changes are being made. Reword it to be more universal and allow it to be used for getter APIs too. Before: $ virsh vcpucount transient-domain --config error: Requested operation is not valid: cannot change persistent config of a transient domain After: $ virsh vcpucount transient-domain --config error: Requested operation is not valid: cannot access persistent config of a transient domain --- Notes: Version 2: -New in series src/conf/domain_conf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 548368e..850e8d6 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -2251,7 +2251,7 @@ virDomainLiveConfigHelperMethod(virCapsPtr caps, if (*flags & VIR_DOMAIN_AFFECT_CONFIG) { if (!dom->persistent) { virReportError(VIR_ERR_OPERATION_INVALID, "%s", - _("cannot change persistent config of a " + _("cannot access persistent config of a " "transient domain")); goto cleanup; } -- 1.8.1.5

On Tue, Apr 16, 2013 at 04:00:08PM +0200, Peter Krempa wrote:
The error message that warns user when a request to chagne/get persistent configuration of a transient domain is requested suggests that changes are being made. Reword it to be more universal and allow it to be used for getter APIs too.
Before: $ virsh vcpucount transient-domain --config error: Requested operation is not valid: cannot change persistent config of a transient domain
After: $ virsh vcpucount transient-domain --config error: Requested operation is not valid: cannot access persistent config of a transient domain
It would really be better saying "transient domains do not have any persistent config" Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- Notes: Version 2: - cache json array size instead of re-calling the getter func - add docs to clarify return value of qemuAgentSetVCPUs - fix typos in header src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++++ 2 files changed, 160 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 8c8c38b..4f59d74 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1455,3 +1455,151 @@ qemuAgentFSTrim(qemuAgentPtr mon, virJSONValueFree(reply); return ret; } + +int +qemuAgentGetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr *info) +{ + int ret = -1; + int i; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + virJSONValuePtr data = NULL; + int ndata; + + if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL))) + return -1; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (!(data = virJSONValueObjectGet(reply, "return"))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus reply was missing return data")); + goto cleanup; + } + + if (data->type != VIR_JSON_TYPE_ARRAY) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus return information was not an array")); + goto cleanup; + } + + ndata = virJSONValueArraySize(data); + + if (VIR_ALLOC_N(*info, ndata) < 0) { + virReportOOMError(); + goto cleanup; + } + + for (i = 0; i < ndata; i++) { + virJSONValuePtr entry = virJSONValueArrayGet(data, i); + qemuAgentCPUInfoPtr in = *info + i; + + if (!entry) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("array element missing in guest-get-vcpus return " + "value")); + goto cleanup; + } + + if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'logical-id' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'online' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "can-offline", + &in->offlinable) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'can-offline' missing in reply of guest-get-vcpus")); + goto cleanup; + } + } + + ret = ndata; + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(data); + return ret; +} + +/** + * Set the VCPU state using guest agent. + * + * Returns -1 on error, ninfo in case everything was successful and less than + * ninfo on a partial failure. + */ +int +qemuAgentSetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr info, + size_t ninfo) +{ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + virJSONValuePtr cpus = NULL; + virJSONValuePtr cpu = NULL; + size_t i; + + /* create the key data array */ + if (!(cpus = virJSONValueNewArray())) + goto no_memory; + + for (i = 0; i < ninfo; i++) { + qemuAgentCPUInfoPtr in = &info[i]; + + /* create single cpu object */ + if (!(cpu = virJSONValueNewObject())) + goto no_memory; + + if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0) + goto no_memory; + + if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0) + goto no_memory; + + if (virJSONValueArrayAppend(cpus, cpu) < 0) + goto no_memory; + + cpu = NULL; + } + + if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus", + "a:vcpus", cpus, + NULL))) + goto cleanup; + + cpus = NULL; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("malformed return value")); + } + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(cpu); + virJSONValueFree(cpus); + return ret; + +no_memory: + virReportOOMError(); + goto cleanup; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index ba04e61..cf70653 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -82,4 +82,16 @@ int qemuAgentArbitraryCommand(qemuAgentPtr mon, int timeout); int qemuAgentFSTrim(qemuAgentPtr mon, unsigned long long minimum); + + +typedef struct _qemuAgentCPUInfo qemuAgentCPUInfo; +typedef qemuAgentCPUInfo *qemuAgentCPUInfoPtr; +struct _qemuAgentCPUInfo { + unsigned int id; /* logical cpu ID */ + bool online; /* true if the CPU is activated */ + bool offlinable; /* true if the CPU can be offlined */ +}; + +int qemuAgentGetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr *info); +int qemuAgentSetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr cpus, size_t ncpus); #endif /* __QEMU_AGENT_H__ */ -- 1.8.1.5

This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running. --- Notes: Version 2: - improve api docs - reject _MAXIMUM and _AGENT to the setter func - break long line include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 24 ++++++++++++++++++++++-- tools/virsh-domain.c | 25 +++++++++++++++++++++++-- tools/virsh.pod | 13 ++++++++++--- 4 files changed, 56 insertions(+), 7 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 518f0fe..f12173d 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2120,6 +2120,7 @@ typedef enum { /* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_VCPU_MAXIMUM = (1 << 2), /* Max rather than current count */ + VIR_DOMAIN_VCPU_AGENT = (1 << 3), /* Use guest-agent based cpu hotplug */ } virDomainVcpuFlags; int virDomainSetVcpus (virDomainPtr domain, diff --git a/src/libvirt.c b/src/libvirt.c index 870519e..ecfc1a6 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8898,6 +8898,12 @@ error: * equal to virConnectGetMaxVcpus(). Otherwise, this call affects the * current virtual CPU limit, which must be less than or equal to the * maximum limit. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. This flag can only be used + * with live guests and is incompatible with VIR_DOMAIN_VCPU_MAXIMUM as the + * maximum limit can't be changed using the guest agent. + * * Not all hypervisors can support all flag combinations. * * Returns 0 in case of success, -1 in case of failure. @@ -8923,6 +8929,15 @@ virDomainSetVcpusFlags(virDomainPtr domain, unsigned int nvcpus, goto error; } + if (flags & VIR_DOMAIN_VCPU_AGENT && + flags & VIR_DOMAIN_VCPU_MAXIMUM) { + virReportInvalidArg(flags, + _("flags 'VIR_DOMAIN_VCPU_MAXIMUM' and " + "'VIR_DOMAIN_VCPU_AGENT' in '%s' are mutually " + "exclusive"), __FUNCTION__); + goto error; + } + virCheckNonZeroArgGoto(nvcpus, error); if ((unsigned short) nvcpus != nvcpus) { @@ -8966,7 +8981,11 @@ error: * * If @flags includes VIR_DOMAIN_VCPU_MAXIMUM, then the maximum * virtual CPU limit is queried. Otherwise, this call queries the - * current virtual CPU limit. + * current virtual CPU count. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. This flag is only usable on + * live domains. * * Returns the number of vCPUs in case of success, -1 in case of failure. */ @@ -8990,7 +9009,8 @@ virDomainGetVcpusFlags(virDomainPtr domain, unsigned int flags) if ((flags & VIR_DOMAIN_AFFECT_LIVE) && (flags & VIR_DOMAIN_AFFECT_CONFIG)) { virReportInvalidArg(flags, - _("flags 'affect live' and 'affect config' in %s are mutually exclusive"), + _("flags 'affect live' and 'affect config' in %s " + "are mutually exclusive"), __FUNCTION__); goto error; } diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 5726744..627b717 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5084,6 +5084,10 @@ static const vshCmdOptDef opts_vcpucount[] = { .type = VSH_OT_BOOL, .help = N_("get value according to current domain state") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5126,6 +5130,11 @@ vshCPUCountCollect(vshControl *ctl, last_error->code == VIR_ERR_INVALID_ARG)) goto cleanup; + if (flags & VIR_DOMAIN_VCPU_AGENT) { + vshError(ctl, "%s", _("Failed to retrieve vCPU count via guest agent")); + goto cleanup; + } + if (!(flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) && virDomainIsActive(dom) == 1) flags |= VIR_DOMAIN_AFFECT_LIVE; @@ -5181,7 +5190,8 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); - bool all = maximum + active + current + config + live == 0; + bool agent = vshCommandOptBool(cmd, "agent"); + bool all = maximum + active + current + config + live + agent == 0; unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; /* Backwards compatibility: prior to 0.9.4, @@ -5196,6 +5206,7 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; @@ -5203,6 +5214,8 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (maximum) flags |= VIR_DOMAIN_VCPU_MAXIMUM; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) return false; @@ -5795,6 +5808,10 @@ static const vshCmdOptDef opts_setvcpus[] = { .type = VSH_OT_BOOL, .help = N_("affect current domain") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5808,17 +5825,21 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); + bool agent = vshCommandOptBool(cmd, "agent"); unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); if (config) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; /* none of the options were specified */ - if (!current && !live && !config && !maximum) + if (!current && flags == 0) flags = -1; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) diff --git a/tools/virsh.pod b/tools/virsh.pod index 11984bc..49438aa 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1585,7 +1585,7 @@ exclusive. If no flag is specified, behavior is different depending on hypervisor. =item B<setvcpus> I<domain> I<count> [I<--maximum>] [[I<--config>] -[I<--live>] | [I<--current>]] +[I<--live>] | [I<--current>]] [I<--agent>] Change the number of virtual CPUs active in a guest domain. By default, this command works on active guest domains. To change the settings for an @@ -1611,6 +1611,10 @@ is up to the hypervisor whether the I<--config> flag is also assumed, and therefore whether the XML configuration is adjusted to make the change persistent. +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. + The I<--maximum> flag controls the maximum number of virtual cpus that can be hot-plugged the next time the domain is booted. As such, it must only be used with the I<--config> flag, and not with the I<--live> flag. @@ -1730,7 +1734,7 @@ NOTE: For an inactive domain, the domain name or UUID must be used as the I<domain>. =item B<vcpucount> I<domain> [{I<--maximum> | I<--active>} -{I<--config> | I<--live> | I<--current>}] +{I<--config> | I<--live> | I<--current>}] [I<--agent>] Print information about the virtual cpu counts of the given I<domain>. If no flags are specified, all possible counts are @@ -1747,7 +1751,10 @@ time the domain will be booted, I<--live> requires a running domain and lists current values, and I<--current> queries according to the current state of the domain (corresponding to I<--live> if running, or I<--config> if inactive); these three flags are mutually exclusive. -Thus, this command always takes exactly zero or two flags. + +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. =item B<vcpuinfo> I<domain> -- 1.8.1.5

On Tue, Apr 16, 2013 at 04:00:10PM +0200, Peter Krempa wrote:
This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running.
How do guest CPU offline/online state changes relate to the offline/online state changes we have traditionally done via the monitor. ie if we offline a CPU with the guest agent, will then now be visible in the state via the monitor ? And the reverse ? Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 04/16/13 19:41, Daniel P. Berrange wrote:
On Tue, Apr 16, 2013 at 04:00:10PM +0200, Peter Krempa wrote:
This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running.
How do guest CPU offline/online state changes relate to the offline/online state changes we have traditionally done via the monitor.
ie if we offline a CPU with the guest agent, will then now be visible in the state via the monitor ? And the reverse ?
If you modify the guest state via agent it is not visible to the host (except for the VCPU not consuming cpu time). If you hotplug/unplug cpus via the monitor, the cpu is added/removed to the guest and thus visible in the agent output. Peter

On Wed, Apr 17, 2013 at 08:47:01AM +0200, Peter Krempa wrote:
On 04/16/13 19:41, Daniel P. Berrange wrote:
On Tue, Apr 16, 2013 at 04:00:10PM +0200, Peter Krempa wrote:
This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running.
How do guest CPU offline/online state changes relate to the offline/online state changes we have traditionally done via the monitor.
ie if we offline a CPU with the guest agent, will then now be visible in the state via the monitor ? And the reverse ?
If you modify the guest state via agent it is not visible to the host (except for the VCPU not consuming cpu time).
So this isn't really VCPU hotplug then. It is just toggling whether the guest OS is scheduling things on that VCPU or not. As such IMHO, we should not be overloading the existing API with this functionality - we should have strictly separate APIs for controlling guest OS vCPU usage. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 04/17/13 11:16, Daniel P. Berrange wrote:
On Wed, Apr 17, 2013 at 08:47:01AM +0200, Peter Krempa wrote:
On 04/16/13 19:41, Daniel P. Berrange wrote:
On Tue, Apr 16, 2013 at 04:00:10PM +0200, Peter Krempa wrote:
This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running.
How do guest CPU offline/online state changes relate to the offline/online state changes we have traditionally done via the monitor.
ie if we offline a CPU with the guest agent, will then now be visible in the state via the monitor ? And the reverse ?
If you modify the guest state via agent it is not visible to the host (except for the VCPU not consuming cpu time).
So this isn't really VCPU hotplug then. It is just toggling whether the guest OS is scheduling things on that VCPU or not. As such IMHO, we should not be overloading the existing API with this functionality - we should have strictly separate APIs for controlling guest OS vCPU usage.
Hmm, okay that seems fair enough. The virDomainSetvcpus api has the ideal name for this but mixing the semantics of disabling CPUs with the angent and ripping them out from the hypervisor might lead to user confusion. In this case we need to design a new API. Here are a few suggestions: 1) virDomainSetGuestVCPU(virDomainPtr dom, unsigned int cpu_id, bool state, unsigned int flags); This api would be very easy for us as only one cpu could be modified at time, thus no painful error reporting on semi-failed transactions. Harder to use in mgmt apps as they would need to call it multiple times. 2) virDomainSetGuestVCPUs(virDomainPtr dom, virBitmapPtr to_online, virBitmapPtr to_offline, unsigned int flags); Doesn't look very nice. Two bitmaps are needed as CPU indexes are not guaranteed to be contiguous inside of a guest. Is easier to use for mgmt apps as only a single call is needed. Libvirt will have to solve failures and maybe even attempt rollback. 3) virDomainSetGuestVCPUs(virDomainPtr dom, virBitmapPtr cpumap, bool state, unsigned int flags); Variation of 2), one cpu map and a state flag that will determine what action to take with the cpus provided in the map instead of two separate maps. Other possibility would be to expose the cpu state in a kind of array as the agent monitor functions do it right now, but this wouldn't be expandable and would require users to use that new data structure. Again, the getter functions will need to do the same, so that the user will be able to obtain a map of the system to do decisions about offlining and onlining processors. In the future we will also need similar APIs for classic cpu hotplug as qemu is probably going to support it in that way. With the classic hotplug we probably won't need to take care of sparse cpu ID's. Any other design or naming suggestions are welcome. Peter

On Thu, Apr 18, 2013 at 12:00:09PM +0200, Peter Krempa wrote:
Hmm, okay that seems fair enough. The virDomainSetvcpus api has the ideal name for this but mixing the semantics of disabling CPUs with the angent and ripping them out from the hypervisor might lead to user confusion.
In this case we need to design a new API. Here are a few suggestions:
1) virDomainSetGuestVCPU(virDomainPtr dom, unsigned int cpu_id, bool state, unsigned int flags);
This api would be very easy for us as only one cpu could be modified at time, thus no painful error reporting on semi-failed transactions. Harder to use in mgmt apps as they would need to call it multiple times.
Yep, the simplicity is nice. I don't think it is much of a burden to apps.
2) virDomainSetGuestVCPUs(virDomainPtr dom, virBitmapPtr to_online, virBitmapPtr to_offline, unsigned int flags);
Doesn't look very nice. Two bitmaps are needed as CPU indexes are not guaranteed to be contiguous inside of a guest. Is easier to use for mgmt apps as only a single call is needed. Libvirt will have to solve failures and maybe even attempt rollback.
More importantly I don't want us to expose virBitmapPtr in the public API ! We'd want to use raw bitmask as we do for other CPU related APIs
3) virDomainSetGuestVCPUs(virDomainPtr dom, virBitmapPtr cpumap, bool state, unsigned int flags);
Variation of 2), one cpu map and a state flag that will determine what action to take with the cpus provided in the map instead of two separate maps.
Other possibility would be to expose the cpu state in a kind of array as the agent monitor functions do it right now, but this wouldn't be expandable and would require users to use that new data structure.
Again, the getter functions will need to do the same, so that the user will be able to obtain a map of the system to do decisions about offlining and onlining processors.
In the future we will also need similar APIs for classic cpu hotplug as qemu is probably going to support it in that way. With the classic hotplug we probably won't need to take care of sparse cpu ID's.
Any other design or naming suggestions are welcome.
Since 'cpu_id' in this refers to the guest OS' notion of CPUs, do we need some way to expose what the guest OS considers the CPU IDs to be. Personally I like the simpler first API, since it has easy to understand error behaviour. What is the data format QEMU agent requires for toggling CPUs ? Does it allow multiple CPUs to be changed at once ? Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 04/23/2013 09:52 AM, Daniel P. Berrange wrote:
Any other design or naming suggestions are welcome.
Since 'cpu_id' in this refers to the guest OS' notion of CPUs, do we need some way to expose what the guest OS considers the CPU IDs to be.
Qemu is still working on that point. For 1.5, the recommendation is to use sequential numbering and only hotplug the next available number (since hotunplug won't make 1.5). For 1.6, the numbers might not be contiguous, based on ACPI constraints, so they are planning on designing something that would report the available ids that can be used in the first place, at which point libvirt would then have to worry about tracking arbitrary in-use ids rather than sequential. https://lists.gnu.org/archive/html/qemu-devel/2013-04/msg04625.html
Personally I like the simpler first API, since it has easy to understand error behaviour.
What is the data format QEMU agent requires for toggling CPUs ? Does it allow multiple CPUs to be changed at once ?
The current agent command allows multiple cpus to be toggled in one command (but we don't necessarily need to expose that complexity); whereas the monitor command for cpu-add is only one cpu per command. -- Eric Blake eblake redhat com +1-919-301-3266 Libvirt virtualization library http://libvirt.org

This patch implements the VIR_DOMAIN_VCPU_AGENT flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- Notes: Version 2: - use job type QUERY instead of MODIFY src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 7c167b7..d0508e9 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4368,17 +4368,24 @@ static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + int i; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!(vm = qemuDomObjFromDomain(dom))) - goto cleanup; + return -1; + + priv = vm->privateData; if (!(caps = virQEMUDriverGetCapabilities(driver, false))) goto cleanup; @@ -4387,16 +4394,61 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) vm, &flags, &def) < 0) goto cleanup; - if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (flags & VIR_DOMAIN_AFFECT_LIVE) def = vm->def; + + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("vCPU count provided by the guest agent can only be " + " requested for live domains")); + goto cleanup; + } + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ncpuinfo < 0) + goto cleanup; + + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + ret = ncpuinfo; + goto cleanup; + } + + /* count the online vcpus */ + ret = 0; + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + ret++; + } + } else { + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) + ret = def->maxvcpus; + else + ret = def->vcpus; } - ret = (flags & VIR_DOMAIN_VCPU_MAXIMUM) ? def->maxvcpus : def->vcpus; cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); return ret; } -- 1.8.1.5

This patch adds support for agent-based cpu disabling and enabling to qemuDomainSetVcpusFlags() API. --- Notes: Version 2: - produce error message if setting of vcpus fails midways src/qemu/qemu_driver.c | 129 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index d0508e9..f045afd 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3678,6 +3678,68 @@ unsupported: static int +qemuDomainPrepareAgentVCPUs(unsigned int nvcpus, + qemuAgentCPUInfoPtr cpuinfo, + int ncpuinfo) +{ + int i; + int nonline = 0; + int nofflinable = 0; + + /* count the active and offlinable cpus */ + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + nonline++; + + if (cpuinfo[i].offlinable && cpuinfo[i].online) + nofflinable++; + + /* This shouldn't happen, but we can't trust the guest agent */ + if (!cpuinfo[i].online && !cpuinfo[i].offlinable) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Invalid data provided by guest agent")); + return -1; + } + } + + /* the guest agent reported less cpus than requested */ + if (nvcpus > ncpuinfo) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest agent reports less cpu than requested")); + return -1; + } + + /* not enough offlinable CPUs to support the request */ + if (nvcpus < nonline - nofflinable) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("Cannot offline enough CPUs")); + return -1; + } + + for (i = 0; i < ncpuinfo; i++) { + if (nvcpus < nonline) { + /* unplug */ + if (cpuinfo[i].offlinable && cpuinfo[i].online) { + cpuinfo[i].online = false; + nonline--; + } + } else if (nvcpus > nonline) { + /* plug */ + if (!cpuinfo[i].online) { + cpuinfo[i].online = true; + nonline++; + } + } else { + /* done */ + break; + } + } + + return 0; +} + + +static int qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, unsigned int flags) { @@ -3688,10 +3750,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, bool maximum; virQEMUDriverConfigPtr cfg = NULL; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!nvcpus || (unsigned short) nvcpus != nvcpus) { virReportError(VIR_ERR_INVALID_ARG, @@ -3706,6 +3772,8 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, if (!(vm = qemuDomObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) goto cleanup; @@ -3731,22 +3799,56 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, goto endjob; } - if (flags & VIR_DOMAIN_AFFECT_LIVE) { - if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("chainging of maximum vCPU count isn't supported " + "via guest agent")); goto endjob; - } - - if (flags & VIR_DOMAIN_AFFECT_CONFIG) { - if (maximum) { - persistentDef->maxvcpus = nvcpus; - if (nvcpus < persistentDef->vcpus) - persistentDef->vcpus = nvcpus; - } else { - persistentDef->vcpus = nvcpus; } - if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + + if (ncpuinfo < 0) + goto endjob; + + if (qemuDomainPrepareAgentVCPUs(nvcpus, cpuinfo, ncpuinfo) < 0) + goto endjob; + + qemuDomainObjEnterAgent(vm); + ret = qemuAgentSetVCPUs(priv->agent, cpuinfo, ncpuinfo); + qemuDomainObjExitAgent(vm); + + if (ret < 0) + goto endjob; + + if (ret < ncpuinfo) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("failed to set state of cpu %d via guest agent"), + cpuinfo[ret-1].id); + ret = -1; goto endjob; + } + } else { + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + goto endjob; + } + + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + if (maximum) { + persistentDef->maxvcpus = nvcpus; + if (nvcpus < persistentDef->vcpus) + persistentDef->vcpus = nvcpus; + } else { + persistentDef->vcpus = nvcpus; + } + + if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + goto endjob; + } } ret = 0; @@ -3759,6 +3861,7 @@ cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); virObjectUnref(cfg); return ret; } -- 1.8.1.5
participants (3)
-
Daniel P. Berrange
-
Eric Blake
-
Peter Krempa