[libvirt] [PATCHv2 00/14] CPU Hotplug series

This is a rebased and a bit improved version of the first posting of support for cpu hotplug related stuff. Since nobody reviewed v1 I did't specify changes to the previous version. Peter Krempa (14): virsh-domain-monitor: Remove ATTRIBUTE_UNUSED from a argument qemu: Use bool instead of int in qemuMonitorSetCPU APIs qemu: Extract more information about vCPUs and threads qemu_agent: Introduce helpers for agent based CPU hot(un)plug API: Introduce VIR_DOMAIN_VCPU_AGENT, for agent based CPU hot(un)plug qemu: Implement request of vCPU state using the guest agent qemu: Implement support for VIR_DOMAIN_VCPU in qemuDomainSetVcpusFlags lib: Add API to map virtual cpus of a guest virsh-domain-monitor: Implement command to map guest vCPUs qemu: Implement virDomainGetVCPUMap for the qemu driver qemu: Implement new QMP command for cpu hotplug lib: Add API to modify vCPU state from the guest using the guest agent virsh-domain: Implement command for virDomainSetGuestVcpu qemu: Implement virDomainSetGuestVcpu in qemu driver daemon/remote.c | 54 +++++ include/libvirt/libvirt.h.in | 26 +++ python/generator.py | 1 + python/libvirt-override-api.xml | 7 + python/libvirt-override.c | 66 ++++++ src/driver.h | 14 ++ src/libvirt.c | 153 ++++++++++++- src/libvirt_public.syms | 7 + src/qemu/qemu_agent.c | 148 +++++++++++++ src/qemu/qemu_agent.h | 12 ++ src/qemu/qemu_driver.c | 468 +++++++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.c | 11 +- src/qemu/qemu_monitor.h | 13 +- src/qemu/qemu_monitor_json.c | 86 ++++++-- src/qemu/qemu_monitor_json.h | 4 +- src/qemu/qemu_monitor_text.c | 94 ++++---- src/qemu/qemu_monitor_text.h | 4 +- src/qemu/qemu_process.c | 63 ++++-- src/remote/remote_driver.c | 48 +++++ src/remote/remote_protocol.x | 31 ++- src/remote_protocol-structs | 20 ++ tools/virsh-domain-monitor.c | 112 +++++++++- tools/virsh-domain.c | 102 ++++++++- tools/virsh.pod | 42 +++- 24 files changed, 1452 insertions(+), 134 deletions(-) -- 1.8.2.1

The "cmd" argument in cmdList is now used. Unmark it as unused. --- tools/virsh-domain-monitor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c index b284154..3ba829c 100644 --- a/tools/virsh-domain-monitor.c +++ b/tools/virsh-domain-monitor.c @@ -1689,7 +1689,7 @@ static const vshCmdOptDef opts_list[] = { if (vshCommandOptBool(cmd, NAME)) \ flags |= (FLAG) static bool -cmdList(vshControl *ctl, const vshCmd *cmd ATTRIBUTE_UNUSED) +cmdList(vshControl *ctl, const vshCmd *cmd) { bool managed = vshCommandOptBool(cmd, "managed-save"); bool optTitle = vshCommandOptBool(cmd, "title"); -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:52PM +0200, Peter Krempa wrote:
The "cmd" argument in cmdList is now used. Unmark it as unused. --- tools/virsh-domain-monitor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c index b284154..3ba829c 100644 --- a/tools/virsh-domain-monitor.c +++ b/tools/virsh-domain-monitor.c @@ -1689,7 +1689,7 @@ static const vshCmdOptDef opts_list[] = { if (vshCommandOptBool(cmd, NAME)) \ flags |= (FLAG) static bool -cmdList(vshControl *ctl, const vshCmd *cmd ATTRIBUTE_UNUSED) +cmdList(vshControl *ctl, const vshCmd *cmd) { bool managed = vshCommandOptBool(cmd, "managed-save"); bool optTitle = vshCommandOptBool(cmd, "title");
ACK, trivial Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 06/07/13 15:01, Daniel P. Berrange wrote:
On Wed, Jun 05, 2013 at 03:43:52PM +0200, Peter Krempa wrote:
The "cmd" argument in cmdList is now used. Unmark it as unused. --- tools/virsh-domain-monitor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
ACK, trivial
Pushed; Thanks. Peter

The 'online' parameter has only two possible values. Use a bool for it. --- src/qemu/qemu_driver.c | 4 ++-- src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 2 +- src/qemu/qemu_monitor_json.c | 2 +- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 2 +- src/qemu/qemu_monitor_text.h | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index db56823..e13b914 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3538,7 +3538,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, if (nvcpus > vcpus) { for (i = vcpus; i < nvcpus; i++) { /* Online new CPU */ - rc = qemuMonitorSetCPU(priv->mon, i, 1); + rc = qemuMonitorSetCPU(priv->mon, i, true); if (rc == 0) goto unsupported; if (rc < 0) @@ -3549,7 +3549,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } else { for (i = vcpus - 1; i >= nvcpus; i--) { /* Offline old CPU */ - rc = qemuMonitorSetCPU(priv->mon, i, 0); + rc = qemuMonitorSetCPU(priv->mon, i, false); if (rc == 0) goto unsupported; if (rc < 0) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index ad326c5..9733aeb 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1670,7 +1670,7 @@ int qemuMonitorSetBalloon(qemuMonitorPtr mon, } -int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, int online) +int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, bool online) { int ret; VIR_DEBUG("mon=%p cpu=%d online=%d", mon, cpu, online); diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index a607712..3d9afa3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -299,7 +299,7 @@ int qemuMonitorExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, bool online); /* XXX should we pass the virDomainDiskDefPtr instead diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 2b73884..26bf09b 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2076,7 +2076,7 @@ cleanup: * or -1 on failure */ int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, - int cpu, int online) + int cpu, bool online) { /* XXX Update to use QMP, if QMP ever adds support for cpu hotplug */ VIR_DEBUG("no QMP support for cpu_set, trying HMP"); diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index 74e2476..d79b86b 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -94,7 +94,7 @@ int qemuMonitorJSONExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorJSONSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, bool online); int qemuMonitorJSONEjectMedia(qemuMonitorPtr mon, const char *dev_name, diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index d4ee93d..aa4145e 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -1231,7 +1231,7 @@ int qemuMonitorTextSetBalloon(qemuMonitorPtr mon, * Returns: 0 if CPU hotplug not supported, +1 if CPU hotplug worked * or -1 on failure */ -int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, int online) +int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, bool online) { char *cmd; char *reply = NULL; diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index fb8e904..5218a8b 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -90,7 +90,7 @@ int qemuMonitorTextExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorTextSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, bool online); int qemuMonitorTextEjectMedia(qemuMonitorPtr mon, const char *dev_name, -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:53PM +0200, Peter Krempa wrote:
The 'online' parameter has only two possible values. Use a bool for it. --- src/qemu/qemu_driver.c | 4 ++-- src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 2 +- src/qemu/qemu_monitor_json.c | 2 +- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 2 +- src/qemu/qemu_monitor_text.h | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-)
ACK Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 06/07/13 15:02, Daniel P. Berrange wrote:
On Wed, Jun 05, 2013 at 03:43:53PM +0200, Peter Krempa wrote:
The 'online' parameter has only two possible values. Use a bool for it. --- src/qemu/qemu_driver.c | 4 ++-- src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 2 +- src/qemu/qemu_monitor_json.c | 2 +- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 2 +- src/qemu/qemu_monitor_text.h | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-)
ACK
Pushed; Thanks.
Daniel
Peter

The qemu monitor provides more information about vCPUs of a guest than we needed currently. This patch upgrades the extraction function to easily extract new data about the vCPUs and fixes code to cope with the new structure. The information extracted here will be later used for mapping of vCPUs of a guest. This patch also refactors the function used to parse data from the text monitor. --- src/qemu/qemu_driver.c | 31 ++++++++------- src/qemu/qemu_monitor.c | 9 +++-- src/qemu/qemu_monitor.h | 11 +++++- src/qemu/qemu_monitor_json.c | 47 +++++++++++++--------- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 92 +++++++++++++++++++++++++------------------- src/qemu/qemu_monitor_text.h | 2 +- src/qemu/qemu_process.c | 63 ++++++++++++++++++++---------- 8 files changed, 159 insertions(+), 98 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index e13b914..3db21d4 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3526,8 +3526,8 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, int ret = -1; int oldvcpus = vm->def->vcpus; int vcpus = oldvcpus; - pid_t *cpupids = NULL; - int ncpupids; + qemuMonitorCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; virCgroupPtr cgroup_vcpu = NULL; qemuDomainObjEnterMonitor(driver, vm); @@ -3568,13 +3568,13 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, * or don't have the "info cpus" command (and they don't support multiple * CPUs anyways), so errors in the re-detection will not be treated * fatal */ - if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) <= 0) { + if ((ncpuinfo = qemuMonitorGetCPUInfo(priv->mon, &cpuinfo)) <= 0) { virResetLastError(); goto cleanup; } /* check if hotplug has failed */ - if (vcpus < oldvcpus && ncpupids == oldvcpus) { + if (vcpus < oldvcpus && ncpuinfo == oldvcpus) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("qemu didn't unplug the vCPUs properly")); vcpus = oldvcpus; @@ -3582,11 +3582,11 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, goto cleanup; } - if (ncpupids != vcpus) { + if (ncpuinfo != vcpus) { virReportError(VIR_ERR_INTERNAL_ERROR, _("got wrong number of vCPU pids from QEMU monitor. " "got %d, wanted %d"), - ncpupids, vcpus); + ncpuinfo, vcpus); ret = -1; goto cleanup; } @@ -3606,11 +3606,11 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } /* Add vcpu thread to the cgroup */ - rv = virCgroupAddTask(cgroup_vcpu, cpupids[i]); + rv = virCgroupAddTask(cgroup_vcpu, cpuinfo[i].thread_id); if (rv < 0) { virReportSystemError(-rv, _("unable to add vcpu %d task %d to cgroup"), - i, cpupids[i]); + i, cpuinfo[i].thread_id); virCgroupRemove(cgroup_vcpu); goto cleanup; } @@ -3650,7 +3650,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, goto cleanup; } } else { - if (virProcessSetAffinity(cpupids[i], + if (virProcessSetAffinity(cpuinfo[i].thread_id, vcpupin->cpumask) < 0) { virReportError(VIR_ERR_SYSTEM_ERROR, _("failed to set cpu affinity for vcpu %d"), @@ -3691,15 +3691,18 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } } - priv->nvcpupids = ncpupids; - VIR_FREE(priv->vcpupids); - priv->vcpupids = cpupids; - cpupids = NULL; + if (VIR_REALLOC_N(priv->vcpupids, ncpuinfo) < 0) { + virReportOOMError(); + goto cleanup; + } + + /* copy the thread id's */ + for (i = 0; i < ncpuinfo; i++) + priv->vcpupids[i] = cpuinfo[i].thread_id; cleanup: qemuDomainObjExitMonitor(driver, vm); vm->def->vcpus = vcpus; - VIR_FREE(cpupids); virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1); if (cgroup_vcpu) virCgroupFree(&cgroup_vcpu); diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 9733aeb..07f89a2 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1282,8 +1282,9 @@ int qemuMonitorSystemReset(qemuMonitorPtr mon) } -int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, - int **pids) +int +qemuMonitorGetCPUInfo(qemuMonitorPtr mon, + qemuMonitorCPUInfoPtr *info) { int ret; VIR_DEBUG("mon=%p", mon); @@ -1295,9 +1296,9 @@ int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, } if (mon->json) - ret = qemuMonitorJSONGetCPUInfo(mon, pids); + ret = qemuMonitorJSONGetCPUInfo(mon, info); else - ret = qemuMonitorTextGetCPUInfo(mon, pids); + ret = qemuMonitorTextGetCPUInfo(mon, info); return ret; } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 3d9afa3..c51fee3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -252,8 +252,17 @@ int qemuMonitorGetStatus(qemuMonitorPtr mon, int qemuMonitorSystemReset(qemuMonitorPtr mon); int qemuMonitorSystemPowerdown(qemuMonitorPtr mon); +typedef struct _qemuMonitorCPUInfo qemuMonitorCPUInfo; +typedef qemuMonitorCPUInfo *qemuMonitorCPUInfoPtr; +struct _qemuMonitorCPUInfo { + unsigned int id; + bool active; + pid_t thread_id; +}; + int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); + int qemuMonitorGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 26bf09b..4a69fec 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -1164,17 +1164,17 @@ int qemuMonitorJSONSystemReset(qemuMonitorPtr mon) /* - * [ { "CPU": 0, "current": true, "halted": false, "pc": 3227107138 }, - * { "CPU": 1, "current": false, "halted": true, "pc": 7108165 } ] + * [ { "CPU": 0, "current": true, "halted": false, "pc": 3227107138, "thread_id": 1234}, + * { "CPU": 1, "current": false, "halted": true, "pc": 7108165, "thread_id": 5678 } ] */ static int qemuMonitorJSONExtractCPUInfo(virJSONValuePtr reply, - int **pids) + qemuMonitorCPUInfoPtr *info) { virJSONValuePtr data; int ret = -1; int i; - int *threads = NULL; + qemuMonitorCPUInfoPtr cpus = NULL; int ncpus; if (!(data = virJSONValueObjectGet(reply, "return"))) { @@ -1195,49 +1195,60 @@ qemuMonitorJSONExtractCPUInfo(virJSONValuePtr reply, goto cleanup; } - if (VIR_REALLOC_N(threads, ncpus) < 0) { + if (VIR_ALLOC_N(cpus, ncpus) < 0) { virReportOOMError(); goto cleanup; } for (i = 0; i < ncpus; i++) { virJSONValuePtr entry = virJSONValueArrayGet(data, i); - int thread; + bool halted; + if (!entry) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("character device information was missing array element")); + _("cpu information was missing array element")); goto cleanup; } - if (virJSONValueObjectGetNumberInt(entry, "thread_id", &thread) < 0) { - /* Only qemu-kvm tree includs thread_id, so treat this as - non-fatal, simply returning no data */ - ret = 0; + if (virJSONValueObjectGetNumberUint(entry, "CPU", &cpus[i].id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("CPU id missing in cpu information")); goto cleanup; } - threads[i] = thread; + if (virJSONValueObjectGetNumberInt(entry, "thread_id", + &cpus[i].thread_id) < 0) { + /* Some versions of qemu don't provide this information */ + cpus[i].thread_id = 0; + } + + if (virJSONValueObjectGetBoolean(entry, "halted", &halted) < 0) { + /* Not that important that we should fail */ + halted = false; + } + + cpus[i].active = !halted; } - *pids = threads; - threads = NULL; + *info = cpus; + cpus = NULL; ret = ncpus; cleanup: - VIR_FREE(threads); + VIR_FREE(cpus); return ret; } int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, - int **pids) + qemuMonitorCPUInfoPtr *info) { int ret; virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("query-cpus", NULL); virJSONValuePtr reply = NULL; - *pids = NULL; + *info = NULL; if (!cmd) return -1; @@ -1248,7 +1259,7 @@ int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, ret = qemuMonitorJSONCheckError(cmd, reply); if (ret == 0) - ret = qemuMonitorJSONExtractCPUInfo(reply, pids); + ret = qemuMonitorJSONExtractCPUInfo(reply, info); virJSONValueFree(cmd); virJSONValueFree(reply); diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index d79b86b..ac077b1 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -53,7 +53,7 @@ int qemuMonitorJSONSystemPowerdown(qemuMonitorPtr mon); int qemuMonitorJSONSystemReset(qemuMonitorPtr mon); int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); int qemuMonitorJSONGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorJSONGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index aa4145e..a967ae6 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -501,13 +501,16 @@ int qemuMonitorTextSystemReset(qemuMonitorPtr mon) { } -int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, - int **pids) +int +qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, + qemuMonitorCPUInfoPtr *info) { char *qemucpus = NULL; - char *line; - pid_t *cpupids = NULL; - size_t ncpupids = 0; + char **cpulines = NULL; + char **cpuline; + qemuMonitorCPUInfoPtr cpus = NULL; + size_t ncpus = 0; + int ret = -1; if (qemuMonitorHMPCommand(mon, "info cpus", &qemucpus) < 0) return -1; @@ -516,52 +519,63 @@ int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, * This is the gross format we're about to parse :-{ * * (qemu) info cpus - * * CPU #0: pc=0x00000000000f0c4a thread_id=30019 - * CPU #1: pc=0x00000000fffffff0 thread_id=30020 - * CPU #2: pc=0x00000000fffffff0 thread_id=30021 + * * CPU #0: pc=0xffffffff8103b90b (halted) thread_id=874519 + * CPU #1: pc=0xffffffff8102e191 (halted) thread_id=874520 + * CPU #2: pc=0xffffffff812c12ae thread_id=874521 + * CPU #3: pc=0xffffffff810521cf thread_id=874522 * */ - line = qemucpus; - do { - char *offset = NULL; - char *end = NULL; + if (!(cpulines = virStringSplit(qemucpus, "\n", 0))) + goto cleanup; + + cpuline = cpulines; + + while (*cpuline++) { + char *offset; + char *end; + int id; int tid = 0; + bool halted = false; - /* Extract host Thread ID */ - if ((offset = strstr(line, "thread_id=")) == NULL) - goto error; + /* Extract vCPU id */ + if (!(offset = strchr(*cpuline, '#')) || + virStrToLong_i(offset + 1, &end, 10, &id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("failed to extract vCPU id")); + goto cleanup; + } - if (virStrToLong_i(offset + strlen("thread_id="), &end, 10, &tid) < 0) - goto error; - if (end == NULL || !c_isspace(*end)) - goto error; + /* extract cpu state */ + if (strstr(*cpuline, "(halted)")) + halted = true; - if (VIR_REALLOC_N(cpupids, ncpupids+1) < 0) - goto error; + /* Extract host Thread ID, some versions of qemu may lack this info */ + if ((offset = strstr(*cpuline, "thread_id="))) { + if (virStrToLong_i(offset + strlen("thread_id="), &end, 10, &tid) < 0) + tid = 0; + } - VIR_DEBUG("tid=%d", tid); - cpupids[ncpupids++] = tid; + ncpus++; - /* Skip to next data line */ - line = strchr(offset, '\r'); - if (line == NULL) - line = strchr(offset, '\n'); - } while (line != NULL); + if (VIR_REALLOC_N(cpus, ncpus) < 0) { + virReportOOMError(); + goto cleanup; + } - /* Validate we got data for all VCPUs we expected */ - VIR_FREE(qemucpus); - *pids = cpupids; - return ncpupids; + cpus[ncpus].id = id; + cpus[ncpus].thread_id = tid; + cpus[ncpus].active = !halted; + } -error: + *info = cpus; + ret = ncpus; + +cleanup: VIR_FREE(qemucpus); - VIR_FREE(cpupids); + VIR_FREE(cpus); + virStringFreeList(cpulines); - /* Returning 0 to indicate non-fatal failure, since - * older QEMU does not have VCPU<->PID mapping and - * we don't want to fail on that - */ - return 0; + return ret; } diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index 5218a8b..08f4280 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -50,7 +50,7 @@ int qemuMonitorTextSystemPowerdown(qemuMonitorPtr mon); int qemuMonitorTextSystemReset(qemuMonitorPtr mon); int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); int qemuMonitorTextGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorTextGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d4fd4fb..8de14c3 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1755,40 +1755,63 @@ static int qemuProcessDetectVcpuPIDs(virQEMUDriverPtr driver, virDomainObjPtr vm) { - pid_t *cpupids = NULL; - int ncpupids; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuMonitorCPUInfoPtr info = NULL; + int ninfo; + int i; + int ret = -1; - qemuDomainObjEnterMonitor(driver, vm); /* failure to get the VCPU<-> PID mapping or to execute the query * command will not be treated fatal as some versions of qemu don't * support this command */ - if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) <= 0) { - qemuDomainObjExitMonitor(driver, vm); - virResetLastError(); + qemuDomainObjEnterMonitor(driver, vm); + ninfo = qemuMonitorGetCPUInfo(priv->mon, &info); + qemuDomainObjExitMonitor(driver, vm); - priv->nvcpupids = 1; - if (VIR_ALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) { - virReportOOMError(); - return -1; - } - priv->vcpupids[0] = vm->pid; - return 0; + if (ninfo < 0) + goto fallback; + + if (VIR_ALLOC_N(priv->vcpupids, ninfo) < 0) { + virReportOOMError(); + goto cleanup; } - qemuDomainObjExitMonitor(driver, vm); - if (ncpupids != vm->def->vcpus) { + for (i = 0; i < ninfo; i++) { + /* some versions of qemu don't expose thread IDs. Fall back gracefully */ + if (info[i].thread_id == 0) + goto fallback; + + priv->vcpupids[i] = info[i].thread_id; + } + + if (ninfo != vm->def->vcpus) { virReportError(VIR_ERR_INTERNAL_ERROR, _("got wrong number of vCPU pids from QEMU monitor. " "got %d, wanted %d"), - ncpupids, vm->def->vcpus); - VIR_FREE(cpupids); - return -1; + ninfo, vm->def->vcpus); + VIR_FREE(priv->vcpupids); + goto cleanup; } - priv->nvcpupids = ncpupids; - priv->vcpupids = cpupids; + priv->nvcpupids = ninfo; + ret = 0; + +cleanup: + VIR_FREE(info); + return ret; + +fallback: + VIR_FREE(info); + VIR_FREE(priv->vcpupids); + virResetLastError(); + priv->nvcpupids = 1; + if (VIR_ALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) { + virReportOOMError(); + return -1; + } + priv->vcpupids[0] = vm->pid; return 0; + } -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:54PM +0200, Peter Krempa wrote:
The qemu monitor provides more information about vCPUs of a guest than we needed currently. This patch upgrades the extraction function to easily extract new data about the vCPUs and fixes code to cope with the new structure. The information extracted here will be later used for mapping of vCPUs of a guest.
This patch also refactors the function used to parse data from the text monitor. --- src/qemu/qemu_driver.c | 31 ++++++++------- src/qemu/qemu_monitor.c | 9 +++-- src/qemu/qemu_monitor.h | 11 +++++- src/qemu/qemu_monitor_json.c | 47 +++++++++++++--------- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 92 +++++++++++++++++++++++++------------------- src/qemu/qemu_monitor_text.h | 2 +- src/qemu/qemu_process.c | 63 ++++++++++++++++++++---------- 8 files changed, 159 insertions(+), 98 deletions(-)
I realize this is not new code, but extra bonus points if you add a case to tests/qemumonitorjsontest.c for the function you are extending. Even more bonus points if you fancy creating an equivalent qemumonitorhmptest.c too. I don't want to hold up this patch set uneccessarily, but I think getting tests, particular of the HMP code would be pretty useful since the parsing code you have is pretty complex. ACK anyway. Tests can be done as a followup ontop of this series. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 06/07/13 15:06, Daniel P. Berrange wrote:
On Wed, Jun 05, 2013 at 03:43:54PM +0200, Peter Krempa wrote:
The qemu monitor provides more information about vCPUs of a guest than we needed currently. This patch upgrades the extraction function to easily extract new data about the vCPUs and fixes code to cope with the new structure. The information extracted here will be later used for mapping of vCPUs of a guest.
This patch also refactors the function used to parse data from the text monitor. --- src/qemu/qemu_driver.c | 31 ++++++++------- src/qemu/qemu_monitor.c | 9 +++-- src/qemu/qemu_monitor.h | 11 +++++- src/qemu/qemu_monitor_json.c | 47 +++++++++++++--------- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 92 +++++++++++++++++++++++++------------------- src/qemu/qemu_monitor_text.h | 2 +- src/qemu/qemu_process.c | 63 ++++++++++++++++++++---------- 8 files changed, 159 insertions(+), 98 deletions(-)
I realize this is not new code, but extra bonus points if you add a case to tests/qemumonitorjsontest.c for the function you are extending.
Even more bonus points if you fancy creating an equivalent qemumonitorhmptest.c too.
I don't want to hold up this patch set uneccessarily, but I think getting tests, particular of the HMP code would be pretty useful since the parsing code you have is pretty complex.
ACK anyway. Tests can be done as a followup ontop of this series.
Thanks for the review. This patch actually is necessary only for the new cpu mapping API so I didn't push it right now and will repost the rest of the series including the tests.
Daniel
Peter

The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++++ 2 files changed, 160 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 00fe13f..a51a97f 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1463,3 +1463,151 @@ qemuAgentFSTrim(qemuAgentPtr mon, virJSONValueFree(reply); return ret; } + +int +qemuAgentGetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr *info) +{ + int ret = -1; + int i; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + virJSONValuePtr data = NULL; + int ndata; + + if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL))) + return -1; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (!(data = virJSONValueObjectGet(reply, "return"))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus reply was missing return data")); + goto cleanup; + } + + if (data->type != VIR_JSON_TYPE_ARRAY) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus return information was not an array")); + goto cleanup; + } + + ndata = virJSONValueArraySize(data); + + if (VIR_ALLOC_N(*info, ndata) < 0) { + virReportOOMError(); + goto cleanup; + } + + for (i = 0; i < ndata; i++) { + virJSONValuePtr entry = virJSONValueArrayGet(data, i); + qemuAgentCPUInfoPtr in = *info + i; + + if (!entry) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("array element missing in guest-get-vcpus return " + "value")); + goto cleanup; + } + + if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'logical-id' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'online' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "can-offline", + &in->offlinable) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'can-offline' missing in reply of guest-get-vcpus")); + goto cleanup; + } + } + + ret = ndata; + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(data); + return ret; +} + +/** + * Set the VCPU state using guest agent. + * + * Returns -1 on error, ninfo in case everything was successful and less than + * ninfo on a partial failure. + */ +int +qemuAgentSetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr info, + size_t ninfo) +{ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + virJSONValuePtr cpus = NULL; + virJSONValuePtr cpu = NULL; + size_t i; + + /* create the key data array */ + if (!(cpus = virJSONValueNewArray())) + goto no_memory; + + for (i = 0; i < ninfo; i++) { + qemuAgentCPUInfoPtr in = &info[i]; + + /* create single cpu object */ + if (!(cpu = virJSONValueNewObject())) + goto no_memory; + + if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0) + goto no_memory; + + if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0) + goto no_memory; + + if (virJSONValueArrayAppend(cpus, cpu) < 0) + goto no_memory; + + cpu = NULL; + } + + if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus", + "a:vcpus", cpus, + NULL))) + goto cleanup; + + cpus = NULL; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("malformed return value")); + } + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(cpu); + virJSONValueFree(cpus); + return ret; + +no_memory: + virReportOOMError(); + goto cleanup; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index ba04e61..cf70653 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -82,4 +82,16 @@ int qemuAgentArbitraryCommand(qemuAgentPtr mon, int timeout); int qemuAgentFSTrim(qemuAgentPtr mon, unsigned long long minimum); + + +typedef struct _qemuAgentCPUInfo qemuAgentCPUInfo; +typedef qemuAgentCPUInfo *qemuAgentCPUInfoPtr; +struct _qemuAgentCPUInfo { + unsigned int id; /* logical cpu ID */ + bool online; /* true if the CPU is activated */ + bool offlinable; /* true if the CPU can be offlined */ +}; + +int qemuAgentGetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr *info); +int qemuAgentSetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr cpus, size_t ncpus); #endif /* __QEMU_AGENT_H__ */ -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:55PM +0200, Peter Krempa wrote:
The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++++ 2 files changed, 160 insertions(+)
Again, extra bonus points if you create a qemuagenttest.c test case, copying the approach from qemumonitorjsontest.c Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On 06/07/13 15:10, Daniel P. Berrange wrote:
On Wed, Jun 05, 2013 at 03:43:55PM +0200, Peter Krempa wrote:
The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++++ 2 files changed, 160 insertions(+)
Again, extra bonus points if you create a qemuagenttest.c test case, copying the approach from qemumonitorjsontest.c
I will follow up with the tests in the posting of the rest of this series later. Pushed. Thanks.
Daniel
Peter

This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running. --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 24 ++++++++++++++++++++++-- tools/virsh-domain.c | 25 +++++++++++++++++++++++-- tools/virsh.pod | 13 ++++++++++--- 4 files changed, 56 insertions(+), 7 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 1804c93..e057be1 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2120,6 +2120,7 @@ typedef enum { /* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_VCPU_MAXIMUM = (1 << 2), /* Max rather than current count */ + VIR_DOMAIN_VCPU_AGENT = (1 << 3), /* Use guest-agent based cpu hotplug */ } virDomainVcpuFlags; int virDomainSetVcpus (virDomainPtr domain, diff --git a/src/libvirt.c b/src/libvirt.c index 6967613..820519a 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8906,6 +8906,12 @@ error: * equal to virConnectGetMaxVcpus(). Otherwise, this call affects the * current virtual CPU limit, which must be less than or equal to the * maximum limit. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. This flag can only be used + * with live guests and is incompatible with VIR_DOMAIN_VCPU_MAXIMUM as the + * maximum limit can't be changed using the guest agent. + * * Not all hypervisors can support all flag combinations. * * Returns 0 in case of success, -1 in case of failure. @@ -8931,6 +8937,15 @@ virDomainSetVcpusFlags(virDomainPtr domain, unsigned int nvcpus, goto error; } + if (flags & VIR_DOMAIN_VCPU_AGENT && + flags & VIR_DOMAIN_VCPU_MAXIMUM) { + virReportInvalidArg(flags, + _("flags 'VIR_DOMAIN_VCPU_MAXIMUM' and " + "'VIR_DOMAIN_VCPU_AGENT' in '%s' are mutually " + "exclusive"), __FUNCTION__); + goto error; + } + virCheckNonZeroArgGoto(nvcpus, error); if ((unsigned short) nvcpus != nvcpus) { @@ -8974,7 +8989,11 @@ error: * * If @flags includes VIR_DOMAIN_VCPU_MAXIMUM, then the maximum * virtual CPU limit is queried. Otherwise, this call queries the - * current virtual CPU limit. + * current virtual CPU count. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. This flag is only usable on + * live domains. * * Returns the number of vCPUs in case of success, -1 in case of failure. */ @@ -8998,7 +9017,8 @@ virDomainGetVcpusFlags(virDomainPtr domain, unsigned int flags) if ((flags & VIR_DOMAIN_AFFECT_LIVE) && (flags & VIR_DOMAIN_AFFECT_CONFIG)) { virReportInvalidArg(flags, - _("flags 'affect live' and 'affect config' in %s are mutually exclusive"), + _("flags 'affect live' and 'affect config' in %s " + "are mutually exclusive"), __FUNCTION__); goto error; } diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 767e288..0616487 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5161,6 +5161,10 @@ static const vshCmdOptDef opts_vcpucount[] = { .type = VSH_OT_BOOL, .help = N_("get value according to current domain state") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5203,6 +5207,11 @@ vshCPUCountCollect(vshControl *ctl, last_error->code == VIR_ERR_INVALID_ARG)) goto cleanup; + if (flags & VIR_DOMAIN_VCPU_AGENT) { + vshError(ctl, "%s", _("Failed to retrieve vCPU count via guest agent")); + goto cleanup; + } + if (!(flags & (VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG)) && virDomainIsActive(dom) == 1) flags |= VIR_DOMAIN_AFFECT_LIVE; @@ -5258,7 +5267,8 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); - bool all = maximum + active + current + config + live == 0; + bool agent = vshCommandOptBool(cmd, "agent"); + bool all = maximum + active + current + config + live + agent == 0; unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; /* Backwards compatibility: prior to 0.9.4, @@ -5273,6 +5283,7 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); VSH_EXCLUSIVE_OPTIONS_VAR(active, maximum); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; @@ -5280,6 +5291,8 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (maximum) flags |= VIR_DOMAIN_VCPU_MAXIMUM; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) return false; @@ -5749,6 +5762,10 @@ static const vshCmdOptDef opts_emulatorpin[] = { .type = VSH_OT_BOOL, .help = N_("affect current domain") }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent based hotplug") + }, {.name = NULL} }; @@ -5885,17 +5902,21 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, "config"); bool live = vshCommandOptBool(cmd, "live"); bool current = vshCommandOptBool(cmd, "current"); + bool agent = vshCommandOptBool(cmd, "agent"); unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); + VSH_EXCLUSIVE_OPTIONS_VAR(agent, config); if (config) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; + if (agent) + flags |= VIR_DOMAIN_VCPU_AGENT; /* none of the options were specified */ - if (!current && !live && !config && !maximum) + if (!current && flags == 0) flags = -1; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) diff --git a/tools/virsh.pod b/tools/virsh.pod index 69c290f..d537653 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1585,7 +1585,7 @@ exclusive. If no flag is specified, behavior is different depending on hypervisor. =item B<setvcpus> I<domain> I<count> [I<--maximum>] [[I<--config>] -[I<--live>] | [I<--current>]] +[I<--live>] | [I<--current>]] [I<--agent>] Change the number of virtual CPUs active in a guest domain. By default, this command works on active guest domains. To change the settings for an @@ -1611,6 +1611,10 @@ is up to the hypervisor whether the I<--config> flag is also assumed, and therefore whether the XML configuration is adjusted to make the change persistent. +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. + The I<--maximum> flag controls the maximum number of virtual cpus that can be hot-plugged the next time the domain is booted. As such, it must only be used with the I<--config> flag, and not with the I<--live> flag. @@ -1737,7 +1741,7 @@ NOTE: For an inactive domain, the domain name or UUID must be used as the I<domain>. =item B<vcpucount> I<domain> [{I<--maximum> | I<--active>} -{I<--config> | I<--live> | I<--current>}] +{I<--config> | I<--live> | I<--current>}] [I<--agent>] Print information about the virtual cpu counts of the given I<domain>. If no flags are specified, all possible counts are @@ -1754,7 +1758,10 @@ time the domain will be booted, I<--live> requires a running domain and lists current values, and I<--current> queries according to the current state of the domain (corresponding to I<--live> if running, or I<--config> if inactive); these three flags are mutually exclusive. -Thus, this command always takes exactly zero or two flags. + +If I<--agent> is specified, then guest agent commands are used to retrieve the +count of available vCPUs from the perspective of the guest. This flag is usable +only for live domains. =item B<vcpuinfo> I<domain> -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:56PM +0200, Peter Krempa wrote:
This flag will allow to use qemu guest agent commands to disable (offline) and enable (online) processors in a live guest that has the guest agent running. --- include/libvirt/libvirt.h.in | 1 + src/libvirt.c | 24 ++++++++++++++++++++++-- tools/virsh-domain.c | 25 +++++++++++++++++++++++-- tools/virsh.pod | 13 ++++++++++--- 4 files changed, 56 insertions(+), 7 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 1804c93..e057be1 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2120,6 +2120,7 @@ typedef enum {
/* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_VCPU_MAXIMUM = (1 << 2), /* Max rather than current count */ + VIR_DOMAIN_VCPU_AGENT = (1 << 3), /* Use guest-agent based cpu hotplug */ } virDomainVcpuFlags;
I'm thinking that perhaps a different name is better here. "guest agent" is an implementation detail. What we're doing here is providing a way to change the guest OS CPU online state. Whether this is via a guest agent or paravirt channel, or something else, is a minor detail. So how about: VIR_DOMAIN_VCPU_GUEST_USAGE
diff --git a/src/libvirt.c b/src/libvirt.c index 6967613..820519a 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8906,6 +8906,12 @@ error: * equal to virConnectGetMaxVcpus(). Otherwise, this call affects the * current virtual CPU limit, which must be less than or equal to the * maximum limit. + * + * If @flags includes VIR_DOMAIN_VCPU_AGENT, then a guest agent is used to + * modify the number of processors used by a domain. This flag can only be used + * with live guests and is incompatible with VIR_DOMAIN_VCPU_MAXIMUM as the + * maximum limit can't be changed using the guest agent.
I think we could be a little bit more explicit about the difference in between. I'd like to see some text that explicitly says that the default behaviour is to control the actual CPUs exposed to the guest, while this flag is just controlling the guest OS CPU usage mask. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

This patch implements the VIR_DOMAIN_VCPU_AGENT flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 3db21d4..2922fce 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4406,17 +4406,24 @@ static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + int i; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!(vm = qemuDomObjFromDomain(dom))) - goto cleanup; + return -1; + + priv = vm->privateData; if (!(caps = virQEMUDriverGetCapabilities(driver, false))) goto cleanup; @@ -4425,16 +4432,61 @@ qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) vm, &flags, &def) < 0) goto cleanup; - if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (flags & VIR_DOMAIN_AFFECT_LIVE) def = vm->def; + + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("vCPU count provided by the guest agent can only be " + " requested for live domains")); + goto cleanup; + } + + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ncpuinfo < 0) + goto cleanup; + + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) { + ret = ncpuinfo; + goto cleanup; + } + + /* count the online vcpus */ + ret = 0; + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + ret++; + } + } else { + if (flags & VIR_DOMAIN_VCPU_MAXIMUM) + ret = def->maxvcpus; + else + ret = def->vcpus; } - ret = (flags & VIR_DOMAIN_VCPU_MAXIMUM) ? def->maxvcpus : def->vcpus; cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); return ret; } -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:57PM +0200, Peter Krempa wrote:
This patch implements the VIR_DOMAIN_VCPU_AGENT flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-)
ACK Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On Wed, Jun 05, 2013 at 03:43:57PM +0200, Peter Krempa wrote:
This patch implements the VIR_DOMAIN_VCPU_AGENT flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-)
ACK Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On Wed, Jun 05, 2013 at 03:43:57PM +0200, Peter Krempa wrote:
This patch implements the VIR_DOMAIN_VCPU_AGENT flag for the qemuDomainGetVcpusFlags() libvirt API implementation. --- src/qemu/qemu_driver.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 4 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 3db21d4..2922fce 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4406,17 +4406,24 @@ static int qemuDomainGetVcpusFlags(virDomainPtr dom, unsigned int flags) { virQEMUDriverPtr driver = dom->conn->privateData; + qemuDomainObjPrivatePtr priv; virDomainObjPtr vm; virDomainDefPtr def; int ret = -1; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo;
This is not initialized here
+ + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + }
This 'goto' jumps over the only initialization of 'ncpuinfo':
+ + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); +
+endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ncpuinfo < 0) + goto cleanup;
So here you're accessing uninitialized memory. CC libvirt_driver_qemu_impl_la-qemu_driver.lo qemu/qemu_driver.c: In function 'qemuDomainGetVcpusFlags': qemu/qemu_driver.c:4573:9: error: 'ncpuinfo' may be used uninitialized in this function [-Werror=maybe-uninitialized] qemu/qemu_driver.c: At top level: Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

This patch adds support for agent-based cpu disabling and enabling to qemuDomainSetVcpusFlags() API. --- src/qemu/qemu_driver.c | 129 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 13 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2922fce..99daf90 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3716,6 +3716,68 @@ unsupported: static int +qemuDomainPrepareAgentVCPUs(unsigned int nvcpus, + qemuAgentCPUInfoPtr cpuinfo, + int ncpuinfo) +{ + int i; + int nonline = 0; + int nofflinable = 0; + + /* count the active and offlinable cpus */ + for (i = 0; i < ncpuinfo; i++) { + if (cpuinfo[i].online) + nonline++; + + if (cpuinfo[i].offlinable && cpuinfo[i].online) + nofflinable++; + + /* This shouldn't happen, but we can't trust the guest agent */ + if (!cpuinfo[i].online && !cpuinfo[i].offlinable) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("Invalid data provided by guest agent")); + return -1; + } + } + + /* the guest agent reported less cpus than requested */ + if (nvcpus > ncpuinfo) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest agent reports less cpu than requested")); + return -1; + } + + /* not enough offlinable CPUs to support the request */ + if (nvcpus < nonline - nofflinable) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("Cannot offline enough CPUs")); + return -1; + } + + for (i = 0; i < ncpuinfo; i++) { + if (nvcpus < nonline) { + /* unplug */ + if (cpuinfo[i].offlinable && cpuinfo[i].online) { + cpuinfo[i].online = false; + nonline--; + } + } else if (nvcpus > nonline) { + /* plug */ + if (!cpuinfo[i].online) { + cpuinfo[i].online = true; + nonline++; + } + } else { + /* done */ + break; + } + } + + return 0; +} + + +static int qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, unsigned int flags) { @@ -3726,10 +3788,14 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, bool maximum; virQEMUDriverConfigPtr cfg = NULL; virCapsPtr caps = NULL; + qemuAgentCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; + qemuDomainObjPrivatePtr priv; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_VCPU_MAXIMUM, -1); + VIR_DOMAIN_VCPU_MAXIMUM | + VIR_DOMAIN_VCPU_AGENT, -1); if (!nvcpus || (unsigned short) nvcpus != nvcpus) { virReportError(VIR_ERR_INVALID_ARG, @@ -3744,6 +3810,8 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, if (!(vm = qemuDomObjFromDomain(dom))) goto cleanup; + priv = vm->privateData; + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) goto cleanup; @@ -3769,22 +3837,56 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus, goto endjob; } - if (flags & VIR_DOMAIN_AFFECT_LIVE) { - if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + if (flags & VIR_DOMAIN_VCPU_AGENT) { + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("chainging of maximum vCPU count isn't supported " + "via guest agent")); goto endjob; - } - - if (flags & VIR_DOMAIN_AFFECT_CONFIG) { - if (maximum) { - persistentDef->maxvcpus = nvcpus; - if (nvcpus < persistentDef->vcpus) - persistentDef->vcpus = nvcpus; - } else { - persistentDef->vcpus = nvcpus; } - if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + qemuDomainObjEnterAgent(vm); + ncpuinfo = qemuAgentGetVCPUs(priv->agent, &cpuinfo); + qemuDomainObjExitAgent(vm); + + if (ncpuinfo < 0) + goto endjob; + + if (qemuDomainPrepareAgentVCPUs(nvcpus, cpuinfo, ncpuinfo) < 0) + goto endjob; + + qemuDomainObjEnterAgent(vm); + ret = qemuAgentSetVCPUs(priv->agent, cpuinfo, ncpuinfo); + qemuDomainObjExitAgent(vm); + + if (ret < 0) + goto endjob; + + if (ret < ncpuinfo) { + virReportError(VIR_ERR_INTERNAL_ERROR, + _("failed to set state of cpu %d via guest agent"), + cpuinfo[ret-1].id); + ret = -1; goto endjob; + } + } else { + if (flags & VIR_DOMAIN_AFFECT_LIVE) { + if (qemuDomainHotplugVcpus(driver, vm, nvcpus) < 0) + goto endjob; + } + + if (flags & VIR_DOMAIN_AFFECT_CONFIG) { + if (maximum) { + persistentDef->maxvcpus = nvcpus; + if (nvcpus < persistentDef->vcpus) + persistentDef->vcpus = nvcpus; + } else { + persistentDef->vcpus = nvcpus; + } + + if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0) + goto endjob; + } } ret = 0; @@ -3797,6 +3899,7 @@ cleanup: if (vm) virObjectUnlock(vm); virObjectUnref(caps); + VIR_FREE(cpuinfo); virObjectUnref(cfg); return ret; } -- 1.8.2.1

On Wed, Jun 05, 2013 at 03:43:58PM +0200, Peter Krempa wrote:
This patch adds support for agent-based cpu disabling and enabling to qemuDomainSetVcpusFlags() API. --- src/qemu/qemu_driver.c | 129 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 13 deletions(-)
ACK Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

QEMU recently added support for cpu hotplug upstream that will support plugging arbitrary cpus. Additionally guest-agent-based cpu state modification from the guest point of view was added recently. This API will help monitoring the state of vCPUs using the two apporoaches as a support infrastructure for the modification APIs. --- daemon/remote.c | 54 ++++++++++++++++++++++++++++++ include/libvirt/libvirt.h.in | 21 ++++++++++++ python/generator.py | 1 + python/libvirt-override-api.xml | 7 ++++ python/libvirt-override.c | 66 ++++++++++++++++++++++++++++++++++++ src/driver.h | 6 ++++ src/libvirt.c | 74 +++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 6 ++++ src/remote/remote_driver.c | 47 ++++++++++++++++++++++++++ src/remote/remote_protocol.x | 18 +++++++++- src/remote_protocol-structs | 13 ++++++++ 11 files changed, 312 insertions(+), 1 deletion(-) diff --git a/daemon/remote.c b/daemon/remote.c index 47267c2..e02ac7e 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -4583,6 +4583,60 @@ cleanup: return rv; } + +static int +remoteDispatchDomainGetVcpuMap(virNetServerPtr server ATTRIBUTE_UNUSED, + virNetServerClientPtr client ATTRIBUTE_UNUSED, + virNetMessagePtr msg ATTRIBUTE_UNUSED, + virNetMessageErrorPtr rerr, + remote_domain_get_vcpu_map_args *args, + remote_domain_get_vcpu_map_ret *ret) +{ + unsigned char *cpumap = NULL; + unsigned int flags; + int cpunum; + int rv = -1; + virDomainPtr dom = NULL; + struct daemonClientPrivate *priv = + virNetServerClientGetPrivateData(client); + + if (!priv->conn) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (!(dom = get_nonnull_domain(priv->conn, args->dom))) + goto cleanup; + + flags = args->flags; + + cpunum = virDomainGetVCPUMap(dom, + args->need_map ? &cpumap : NULL, + flags); + if (cpunum < 0) + goto cleanup; + + /* 'serialize' return cpumap */ + if (args->need_map) { + ret->cpumap.cpumap_len = VIR_CPU_MAPLEN(cpunum); + ret->cpumap.cpumap_val = (char *) cpumap; + cpumap = NULL; + } + + ret->ret = cpunum; + + rv = 0; + +cleanup: + if (rv < 0) + virNetMessageSaveError(rerr); + if (dom) + virDomainFree(dom); + VIR_FREE(cpumap); + return rv; +} + + static int lxcDispatchDomainOpenNamespace(virNetServerPtr server ATTRIBUTE_UNUSED, virNetServerClientPtr client ATTRIBUTE_UNUSED, diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index e057be1..c8f639a 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -4859,6 +4859,27 @@ int virNWFilterGetUUIDString (virNWFilterPtr nwfilter, char *buf); char * virNWFilterGetXMLDesc (virNWFilterPtr nwfilter, unsigned int flags); + +/** + * virDomainGetVCPUMapFlags + * + * Since 1.0.6 + */ +typedef enum { + VIR_DOMAIN_VCPU_MAP_HYPERVISOR = (1 << 0), /* request data from hypervisor */ + VIR_DOMAIN_VCPU_MAP_AGENT = (1 << 1), /* request data from guest agent */ + + VIR_DOMAIN_VCPU_MAP_POSSIBLE = (1 << 2), /* map all possible vcpus */ + VIR_DOMAIN_VCPU_MAP_ONLINE = (1 << 3), /* map all online vcpus */ + VIR_DOMAIN_VCPU_MAP_OFFLINE = (1 << 4), /* map all offline vcpus */ + VIR_DOMAIN_VCPU_MAP_OFFLINABLE = (1 << 5), /* map all vcpus that can be offlined */ + VIR_DOMAIN_VCPU_MAP_ACTIVE = (1 << 6), /* map cpus that are in use by the guest */ +} virDomainGetVCPUMapFlags; + +int virDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags); + /** * virDomainConsoleFlags * diff --git a/python/generator.py b/python/generator.py index 8c380bb..4884c29 100755 --- a/python/generator.py +++ b/python/generator.py @@ -458,6 +458,7 @@ skip_impl = ( 'virNodeGetMemoryParameters', 'virNodeSetMemoryParameters', 'virNodeGetCPUMap', + 'virDomainGetVCPUMap', ) lxc_skip_impl = ( diff --git a/python/libvirt-override-api.xml b/python/libvirt-override-api.xml index 155ab36..7725800 100644 --- a/python/libvirt-override-api.xml +++ b/python/libvirt-override-api.xml @@ -584,5 +584,12 @@ <arg name='conn' type='virConnectPtr' info='pointer to the hypervisor connection'/> <arg name='flags' type='int' info='unused, pass 0'/> </function> + <function name='virDomainGetVCPUMap' file='python'> + <info>Get node CPU information</info> + <return type='char *' info='(cpunum, cpumap) on success, None on error'/> + <arg name='dom' type='virDomainPtr' info='pointer to the domain'/> + <arg name='flags' type='unsigned int' info='an OR'ed set of virDomainGetVCPUMapFlags'/> + </function> + </symbols> </api> diff --git a/python/libvirt-override.c b/python/libvirt-override.c index b6462c2..fb451c9 100644 --- a/python/libvirt-override.c +++ b/python/libvirt-override.c @@ -6759,6 +6759,71 @@ error: } +static PyObject * +libvirt_virDomainGetVCPUMap(PyObject *self ATTRIBUTE_UNUSED, + PyObject *args) +{ + virDomainPtr domain; + PyObject *pyobj_domain; + PyObject *ret = NULL; + PyObject *pycpumap = NULL; + PyObject *pyused = NULL; + PyObject *pycpunum = NULL; + PyObject *pyonline = NULL; + int i_retval; + unsigned char *cpumap = NULL; + unsigned int flags; + int i; + + if (!PyArg_ParseTuple(args, (char *)"Oi:virDomainGetVCPUMap", + &pyobj_domain, &flags)) + return NULL; + + domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virDomainGetVCPUMap(domain, &cpumap, flags); + LIBVIRT_END_ALLOW_THREADS; + + if (i_retval < 0) + return VIR_PY_NONE; + + if (!(ret = PyTuple_New(2))) + goto error; + + /* 0: number of CPUs */ + if ((pycpunum = PyLong_FromLong(i_retval)) == NULL || + PyTuple_SetItem(ret, 0, pycpunum) < 0) + goto error; + + /* 1: CPU map */ + if ((pycpumap = PyList_New(i_retval)) == NULL) + goto error; + + for (i = 0; i < i_retval; i++) { + if ((pyused = PyBool_FromLong(VIR_CPU_USED(cpumap, i))) == NULL) + goto error; + if (PyList_SetItem(pycpumap, i, pyused) < 0) + goto error; + } + + if (PyTuple_SetItem(ret, 1, pycpumap) < 0) + goto error; + +cleanup: + VIR_FREE(cpumap); + return ret; +error: + Py_XDECREF(ret); + Py_XDECREF(pycpumap); + Py_XDECREF(pyused); + Py_XDECREF(pycpunum); + Py_XDECREF(pyonline); + ret = NULL; + goto cleanup; +} + + /************************************************************************ * * * The registration stuff * @@ -6882,6 +6947,7 @@ static PyMethodDef libvirtMethods[] = { {(char *) "virNodeGetMemoryParameters", libvirt_virNodeGetMemoryParameters, METH_VARARGS, NULL}, {(char *) "virNodeSetMemoryParameters", libvirt_virNodeSetMemoryParameters, METH_VARARGS, NULL}, {(char *) "virNodeGetCPUMap", libvirt_virNodeGetCPUMap, METH_VARARGS, NULL}, + {(char *) "virDomainGetVCPUMap", libvirt_virDomainGetVCPUMap, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} }; diff --git a/src/driver.h b/src/driver.h index ec5fc53..cbafdce 100644 --- a/src/driver.h +++ b/src/driver.h @@ -1045,6 +1045,11 @@ typedef int int **fdlist, unsigned int flags); +typedef int +(*virDrvDomainGetVCPUMap)(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags); + typedef struct _virDriver virDriver; typedef virDriver *virDriverPtr; @@ -1246,6 +1251,7 @@ struct _virDriver { virDrvDomainFSTrim domainFSTrim; virDrvDomainSendProcessSignal domainSendProcessSignal; virDrvDomainLxcOpenNamespace domainLxcOpenNamespace; + virDrvDomainGetVCPUMap domainGetVCPUMap; }; diff --git a/src/libvirt.c b/src/libvirt.c index 820519a..59e02a1 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -9526,6 +9526,80 @@ error: return -1; } + +/** + * virDomainGetVCPUMap: + * @domain: pointer to domain object + * @cpumap: pointer to a pointer where the returned cpumap will be stored + * @flags: bitwise-OR of virDomainGetVCPUMapFlags + * + * Request a map of virtual processors of a domain. Use @flags to control + * the data contained in the map. + * + * If @flags is 0 this function behaves as if flags + * VIR_DOMAIN_VCPU_MAP_HYPERVISOR and VIR_DOMAIN_VCPU_MAP_POSSIBLE were + * specified. + * + * When @flags contains VIR_DOMAIN_VCPU_MAP_AGENT the guest agent is used + * to query the requested data from the point of view of the guest. + * + * Returns the length of @cpumap or -1 in case of error. + */ +int +virDomainGetVCPUMap(virDomainPtr domain, + unsigned char **cpumap, + unsigned int flags) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN(domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + conn = domain->conn; + + if ((flags & (VIR_DOMAIN_VCPU_MAP_HYPERVISOR | + VIR_DOMAIN_VCPU_MAP_AGENT)) == 0) + flags |= VIR_DOMAIN_VCPU_MAP_HYPERVISOR; + + if ((flags & (VIR_DOMAIN_VCPU_MAP_POSSIBLE | + VIR_DOMAIN_VCPU_MAP_ONLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) == 0) + flags |= VIR_DOMAIN_VCPU_MAP_ONLINE; + + if (flags & VIR_DOMAIN_VCPU_MAP_HYPERVISOR && + flags & VIR_DOMAIN_VCPU_MAP_AGENT) { + virReportInvalidArg(flags, + _("flags VIR_DOMAIN_VCPU_MAP_HYPERVISOR and " + "VIR_DOMAIN_VCPU_MAP_AGENT in %s " + "are mutually exclusive"), + __FUNCTION__); + goto error; + } + + if (conn->driver->domainGetVCPUMap) { + int ret; + ret = conn->driver->domainGetVCPUMap(domain, cpumap, flags); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; + +} + /** * virDomainGetSecurityLabel: * @domain: a domain object diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 4ee2d27..04465be 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -621,4 +621,10 @@ LIBVIRT_1.0.6 { virGetLastErrorMessage; } LIBVIRT_1.0.5; +LIBVIRT_1.0.7 { + global: + virDomainGetVCPUMap; +} LIBVIRT_1.0.6; + + # .... define new API here using predicted next version number .... diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 2cda559..99fa3c1 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -5951,6 +5951,52 @@ done: static int +remoteDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags) +{ + int rv = -1; + remote_domain_get_vcpu_map_args args; + remote_domain_get_vcpu_map_ret ret; + struct private_data *priv = dom->conn->privateData; + + remoteDriverLock(priv); + + args.need_map = !!cpumap; + args.flags = flags; + make_nonnull_domain(&args.dom, dom); + + memset(&ret, 0, sizeof(ret)); + if (call(dom->conn, priv, 0, REMOTE_PROC_DOMAIN_GET_VCPU_MAP, + (xdrproc_t) xdr_remote_domain_get_vcpu_map_args, + (char *) &args, + (xdrproc_t) xdr_remote_domain_get_vcpu_map_ret, + (char *) &ret) == -1) + goto done; + + if (ret.ret < 0) + goto cleanup; + + if (cpumap) { + if (VIR_ALLOC_N(*cpumap, ret.cpumap.cpumap_len) < 0) { + virReportOOMError(); + goto cleanup; + } + memcpy(*cpumap, ret.cpumap.cpumap_val, ret.cpumap.cpumap_len); + } + + rv = ret.ret; + +cleanup: + xdr_free((xdrproc_t) xdr_remote_domain_get_vcpu_map_ret, (char *) &ret); +done: + remoteDriverUnlock(priv); + return rv; +} + + + +static int remoteDomainLxcOpenNamespace(virDomainPtr domain, int **fdlist, unsigned int flags) @@ -6348,6 +6394,7 @@ static virDriver remote_driver = { .nodeGetCPUMap = remoteNodeGetCPUMap, /* 1.0.0 */ .domainFSTrim = remoteDomainFSTrim, /* 1.0.1 */ .domainLxcOpenNamespace = remoteDomainLxcOpenNamespace, /* 1.0.2 */ + .domainGetVCPUMap = remoteDomainGetVCPUMap, /* 1.0.7 */ }; static virNetworkDriver network_driver = { diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 9723377..cec3541 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2737,6 +2737,17 @@ struct remote_node_get_cpu_map_ret { int ret; }; +struct remote_domain_get_vcpu_map_args { + remote_nonnull_domain dom; + int need_map; + unsigned int flags; +}; + +struct remote_domain_get_vcpu_map_ret { + opaque cpumap<REMOTE_CPUMAP_MAX>; + int ret; +}; + struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -4434,6 +4445,11 @@ enum remote_procedure { /** * @generate: server */ - REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301 + REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, + + /** + * @generate: none + */ + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302 }; diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index ea38ea2..e1ceabd 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -2186,6 +2186,18 @@ struct remote_node_get_cpu_map_ret { u_int online; int ret; }; +struct remote_domain_get_vcpu_map_args { + remote_nonnull_domain dom; + int need_map; + u_int flags; +}; +struct remote_domain_get_vcpu_map_ret { + struct { + u_int cpumap_len; + char * cpumap_val; + } cpumap; + int ret; +}; struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -2494,4 +2506,5 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_GET_COMPRESSION_CACHE = 299, REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE = 300, REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, }; -- 1.8.2.1

Add support for the virDomainGetVCPUMap API. The code is based on node vcpu mapping. --- tools/virsh-domain-monitor.c | 110 +++++++++++++++++++++++++++++++++++++++++++ tools/virsh.pod | 23 +++++++++ 2 files changed, 133 insertions(+) diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c index 3ba829c..ab3a537 100644 --- a/tools/virsh-domain-monitor.c +++ b/tools/virsh-domain-monitor.c @@ -1806,6 +1806,111 @@ cleanup: } #undef FILTER +static const vshCmdInfo info_VCPUMap[] = { + {.name = "help", + .data = N_("request map of virtual processors of a domain") + }, + {.name = "desc", + .data = N_("Returns the map of virtual processors of a domain") + }, + {.name = NULL} +}; + +static const vshCmdOptDef opts_VCPUMap[] = { + {.name = "domain", + .type = VSH_OT_DATA, + .flags = VSH_OFLAG_REQ, + .help = N_("domain name, id or uuid") + }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent") + }, + {.name = "hypervisor", + .type = VSH_OT_BOOL, + .help = N_("show hypervisor data") + }, + {.name = "online", + .type = VSH_OT_BOOL, + .help = N_("show online cpus") + }, + {.name = "offlinable", + .type = VSH_OT_BOOL, + .help = N_("show offlinable vCPUs") + }, + {.name = "offline", + .type = VSH_OT_BOOL, + .help = N_("show offline cpus") + }, + {.name = "possible", + .type = VSH_OT_BOOL, + .help = N_("show all possible vCPUs") + }, + {.name = "active", + .type = VSH_OT_BOOL, + .help = N_("show currently active vCPUs") + }, + {.name = NULL} +}; + +static bool +cmdVCPUMap(vshControl *ctl, const vshCmd *cmd) +{ + int cpu, cpunum; + unsigned char *cpumap = NULL; + bool ret = false; + virDomainPtr dom = NULL; + unsigned int flags = 0; + bool agent = vshCommandOptBool(cmd, "agent"); + bool hypervisor = vshCommandOptBool(cmd, "hypervisor"); + bool online = vshCommandOptBool(cmd, "online"); + bool offline = vshCommandOptBool(cmd, "offline"); + + VSH_EXCLUSIVE_OPTIONS_VAR(agent, hypervisor); + VSH_EXCLUSIVE_OPTIONS_VAR(online, offline); + + if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) + return false; + + if (agent) + flags |= VIR_DOMAIN_VCPU_MAP_AGENT; + + if (hypervisor) + flags |= VIR_DOMAIN_VCPU_MAP_HYPERVISOR; + + if (online) + flags |= VIR_DOMAIN_VCPU_MAP_ONLINE; + + if (offline) + flags |= VIR_DOMAIN_VCPU_MAP_OFFLINE; + + if (vshCommandOptBool(cmd, "offlinable")) + flags |= VIR_DOMAIN_VCPU_MAP_OFFLINABLE; + + if (vshCommandOptBool(cmd, "possible")) + flags |= VIR_DOMAIN_VCPU_MAP_POSSIBLE; + + if (vshCommandOptBool(cmd, "active")) + flags |= VIR_DOMAIN_VCPU_MAP_ACTIVE; + + if ((cpunum = virDomainGetVCPUMap(dom, &cpumap, flags)) < 0) { + vshError(ctl, "%s", _("Unable to get cpu map")); + goto cleanup; + } + + vshPrint(ctl, "%-15s ", _("CPU map:")); + for (cpu = 0; cpu < cpunum; cpu++) + vshPrint(ctl, "%c", VIR_CPU_USED(cpumap, cpu) ? 'y' : '-'); + vshPrint(ctl, "\n"); + + ret = true; + +cleanup: + virDomainFree(dom); + VIR_FREE(cpumap); + return ret; +} + const vshCmdDef domMonitoringCmds[] = { {.name = "domblkerror", .handler = cmdDomBlkError, @@ -1879,5 +1984,10 @@ const vshCmdDef domMonitoringCmds[] = { .info = info_list, .flags = 0 }, + {.name = "vcpumap", + .handler = cmdVCPUMap, + .opts = opts_VCPUMap, + .info = info_VCPUMap, + }, {.name = NULL} }; diff --git a/tools/virsh.pod b/tools/virsh.pod index d537653..42e39d3 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1811,6 +1811,29 @@ If no flag is specified, behavior is different depending on hypervisor. Output the IP address and port number for the VNC display. If the information is not available the processes will provide an exit code of 1. +=item B<vcpumap> I<domain> [I<--hypervisor> | I<--agent>] +[I<--active> | I<--online> | I<--offline> | I<--offlinable> | I<--possible>] + +Query a map of vCPUs of a domain. + +When I<--agent> is specified the guest agent is queried for the state of the +vCPUs from the point of view of the guest. Otherwise I<--hypervisor> is +assumed. + +If I<--possible> is specified all cpu ID's referenced by the guest are listed. +The I<--offlinable> flag limits the view on vCPUs that can be disabled. +I<--online> and I<--offline> limit the output according to the state of the +vCPU. With I<--active> specified only vCPUs that are active are listed. +The flags may be combined to filter the results further. + +With no flag specified, libvirt assumes that I<--online> and I<--hypervisor> +is requested. + +I<--agent> and I<--hypervisor> as well as I<--online> and I<--offline> are +mutually exclusive options. + +B<Note>: Some flags may not be supported with both query modes. + =back =head1 DEVICE COMMANDS -- 1.8.2.1

Use the agent cpu state code and the upgraded hypervisor vcpu state retrieval code to implement virDomainGetVCPUMap() api. --- src/qemu/qemu_driver.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 99daf90..ba3a6e1 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -15198,6 +15198,184 @@ qemuNodeSuspendForDuration(virConnectPtr conn ATTRIBUTE_UNUSED, return nodeSuspendForDuration(target, duration, flags); } +#define MATCH(FLAG) (flags & (FLAG)) +static int +qemuDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags) +{ + virQEMUDriverPtr driver = dom->conn->privateData; + virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; + + qemuAgentCPUInfoPtr agentinfo = NULL; + qemuMonitorCPUInfoPtr vcpuinfo = NULL; + int ninfo = -1; + + virBitmapPtr cpus = NULL; + int i; + int ret = -1; + int dummy; + + virCheckFlags(VIR_DOMAIN_VCPU_MAP_HYPERVISOR | + VIR_DOMAIN_VCPU_MAP_AGENT | + VIR_DOMAIN_VCPU_MAP_POSSIBLE | + VIR_DOMAIN_VCPU_MAP_ONLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINABLE | + VIR_DOMAIN_VCPU_MAP_ACTIVE, -1); + + + if (!(vm = qemuDomObjFromDomain(dom))) + return -1; + + priv = vm->privateData; + + /* request data from the guest */ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + + if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) { + if (!priv->agent) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("guest agent is not configured")); + goto endjob; + } + qemuDomainObjEnterAgent(vm); + ninfo = qemuAgentGetVCPUs(priv->agent, &agentinfo); + qemuDomainObjExitAgent(vm); + } else { + qemuDomainObjEnterMonitor(driver, vm); + ninfo = qemuMonitorGetCPUInfo(priv->mon, &vcpuinfo); + qemuDomainObjExitMonitor(driver, vm); + } + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ninfo < 0) + goto cleanup; + + + if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) { + unsigned int maxcpu = 0; + + if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("qemu guest agent doesn't report active vCPUs ")); + goto cleanup; + } + + /* count cpus */ + for (i = 0; i < ninfo; i++) { + if (agentinfo[i].id > maxcpu) + maxcpu = agentinfo[i].id; + } + + /* allocate the returned array, vCPUs are indexed from 0 */ + if (!(cpus = virBitmapNew(maxcpu + 1))) { + virReportOOMError(); + goto cleanup; + } + + /* VIR_DOMAIN_VCPU_MAP_POSSIBLE */ + for (i = 0; i < ninfo; i++) + ignore_value(virBitmapSetBit(cpus, agentinfo[i].id)); + + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) { + for (i = 0; i < ninfo; i++) { + if (!agentinfo[i].online) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) { + for (i = 0; i < ninfo; i++) { + if (agentinfo[i].online) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) { + for (i = 0; i < ninfo; i++) { + if (!agentinfo[i].offlinable) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + } else { + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("qemu driver doesn't support reporting of " + "offlinable vCPUs of the hypervisor")); + goto cleanup; + } + + /* hypervisor cpu stats */ + if (!(cpus = virBitmapNew(vm->def->maxvcpus))) { + virReportOOMError(); + goto cleanup; + } + + /* map active cpus */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) { + /* offline vcpus can't be active */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) + goto done; + + for (i = 0; i < ninfo; i++) { + if (vcpuinfo[i].active) + ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id)); + } + + goto done; + } + + /* for native hotplug, all configured vCPUs are possible for hotplug */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_POSSIBLE)) { + virBitmapSetAll(cpus); + goto done; + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) { + /* online and offline together produce an empty map */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) + goto done; + + /* set all bit's so we can subtract online cpus from it later */ + virBitmapSetAll(cpus); + } + + for (i = 0; i < ninfo; i++) { + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) + ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id)); + else + ignore_value(virBitmapClearBit(cpus, vcpuinfo[i].id)); + } + } + +done: + if (cpumap && virBitmapToData(cpus, cpumap, &dummy) < 0) + goto cleanup; + + ret = virBitmapSize(cpus); + +cleanup: + if (vm) + virObjectUnlock(vm); + virBitmapFree(cpus); + VIR_FREE(vcpuinfo); + VIR_FREE(agentinfo); + return ret; +} +#undef MATCH static virDriver qemuDriver = { .no = VIR_DRV_QEMU, @@ -15378,6 +15556,7 @@ static virDriver qemuDriver = { .nodeGetCPUMap = qemuNodeGetCPUMap, /* 1.0.0 */ .domainFSTrim = qemuDomainFSTrim, /* 1.0.1 */ .domainOpenChannel = qemuDomainOpenChannel, /* 1.0.2 */ + .domainGetVCPUMap = qemuDomainGetVCPUMap, /* 1.0.7 */ }; -- 1.8.2.1

This patch implements support for the "cpu-add" QMP command that plugs CPUs into a live guest. The "cpu-add" command was introduced in QEMU 1.5. For the hotplug to work machine type "pc-i440fx-1.5" is required. --- src/qemu/qemu_monitor_json.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 4a69fec..a415732 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2089,9 +2089,42 @@ cleanup: int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, bool online) { - /* XXX Update to use QMP, if QMP ever adds support for cpu hotplug */ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + + if (online) { + cmd = qemuMonitorJSONMakeCommand("cpu-add", + "i:id", cpu, + NULL); + } else { + /* offlining is not yet implemented in qmp */ + goto fallback; + } + if (!cmd) + goto cleanup; + + if ((ret = qemuMonitorJSONCommand(mon, cmd, &reply)) < 0) + goto cleanup; + + if (qemuMonitorJSONHasError(reply, "CommandNotFound")) + goto fallback; + else + ret = qemuMonitorJSONCheckError(cmd, reply); + + /* this function has non-standard return values, so adapt it */ + if (ret == 0) + ret = 1; + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; + +fallback: VIR_DEBUG("no QMP support for cpu_set, trying HMP"); - return qemuMonitorTextSetCPU(mon, cpu, online); + ret = qemuMonitorTextSetCPU(mon, cpu, online); + goto cleanup; } -- 1.8.2.1

On 05.06.2013 15:44, Peter Krempa wrote:
This patch implements support for the "cpu-add" QMP command that plugs CPUs into a live guest. The "cpu-add" command was introduced in QEMU 1.5. For the hotplug to work machine type "pc-i440fx-1.5" is required. --- src/qemu/qemu_monitor_json.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 4a69fec..a415732 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2089,9 +2089,42 @@ cleanup: int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, bool online) { - /* XXX Update to use QMP, if QMP ever adds support for cpu hotplug */ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + + if (online) { + cmd = qemuMonitorJSONMakeCommand("cpu-add", + "i:id", cpu, + NULL); + } else { + /* offlining is not yet implemented in qmp */ + goto fallback; + } + if (!cmd) + goto cleanup; + + if ((ret = qemuMonitorJSONCommand(mon, cmd, &reply)) < 0) + goto cleanup; + + if (qemuMonitorJSONHasError(reply, "CommandNotFound")) + goto fallback; + else + ret = qemuMonitorJSONCheckError(cmd, reply); + + /* this function has non-standard return values, so adapt it */ + if (ret == 0) + ret = 1;
Yeah, this is strange, but it bubbles all the way down to qemuDomainHotplugVcpus in qemu_driver.c. Not worth refactoring for now.
+ +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; + +fallback: VIR_DEBUG("no QMP support for cpu_set, trying HMP"); - return qemuMonitorTextSetCPU(mon, cpu, online); + ret = qemuMonitorTextSetCPU(mon, cpu, online); + goto cleanup; }
ACK Michal

On 06/07/13 16:15, Michal Privoznik wrote:
On 05.06.2013 15:44, Peter Krempa wrote:
This patch implements support for the "cpu-add" QMP command that plugs CPUs into a live guest. The "cpu-add" command was introduced in QEMU 1.5. For the hotplug to work machine type "pc-i440fx-1.5" is required. --- src/qemu/qemu_monitor_json.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-)
ACK
Pushed; Thanks. Peter

This patch introduces API virDomainSetGuestVcpus that will be used to work with vCPU state from the point of view of the guest using the guest agent. --- include/libvirt/libvirt.h.in | 4 ++++ src/driver.h | 8 +++++++ src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 1 + src/remote/remote_driver.c | 1 + src/remote/remote_protocol.x | 15 +++++++++++- src/remote_protocol-structs | 7 ++++++ 7 files changed, 90 insertions(+), 1 deletion(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index c8f639a..2dccc68 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2128,6 +2128,10 @@ int virDomainSetVcpus (virDomainPtr domain, int virDomainSetVcpusFlags (virDomainPtr domain, unsigned int nvcpus, unsigned int flags); +int virDomainSetGuestVcpu (virDomainPtr domain, + unsigned int id, + unsigned int online, + unsigned int flags); int virDomainGetVcpusFlags (virDomainPtr domain, unsigned int flags); diff --git a/src/driver.h b/src/driver.h index cbafdce..301e490 100644 --- a/src/driver.h +++ b/src/driver.h @@ -1050,6 +1050,13 @@ typedef int unsigned char **cpumap, unsigned int flags); +typedef int +(*virDrvDomainSetGuestVcpu)(virDomainPtr dom, + unsigned int id, + unsigned int online, + unsigned int flags); + + typedef struct _virDriver virDriver; typedef virDriver *virDriverPtr; @@ -1252,6 +1259,7 @@ struct _virDriver { virDrvDomainSendProcessSignal domainSendProcessSignal; virDrvDomainLxcOpenNamespace domainLxcOpenNamespace; virDrvDomainGetVCPUMap domainGetVCPUMap; + virDrvDomainSetGuestVcpu domainSetGuestVcpu; }; diff --git a/src/libvirt.c b/src/libvirt.c index 59e02a1..9dd6b97 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8969,6 +8969,61 @@ error: return -1; } + +/** + * virDomainSetGuestVcpu: + * @domain: pointer to domain object, or NULL for Domain0 + * @id: vcpu ID in the guest + * @online: desired state of the vcpu + * @flags: currently unused, callers should pass 0 + * + * Dynamically change the state of a virtual CPUs used by the domain by + * using the guest agent. The vCPU id used is from the point of view of + * the guest. + * + * Returns 0 in case of success, -1 in case of failure. + */ + +int +virDomainSetGuestVcpu(virDomainPtr domain, + unsigned int id, + unsigned int online, + unsigned int flags) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "id=%u, online=%u, flags=%x", id, online, flags); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN(domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + if (domain->conn->flags & VIR_CONNECT_RO) { + virLibDomainError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + conn = domain->conn; + + if (conn->driver->domainSetGuestVcpu) { + int ret; + ret = conn->driver->domainSetGuestVcpu(domain, id, online, flags); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; +} + + /** * virDomainGetVcpusFlags: * @domain: pointer to domain object, or NULL for Domain0 diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 04465be..bbb7c77 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -624,6 +624,7 @@ LIBVIRT_1.0.6 { LIBVIRT_1.0.7 { global: virDomainGetVCPUMap; + virDomainSetGuestVcpu; } LIBVIRT_1.0.6; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 99fa3c1..4dca3eb 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -6395,6 +6395,7 @@ static virDriver remote_driver = { .domainFSTrim = remoteDomainFSTrim, /* 1.0.1 */ .domainLxcOpenNamespace = remoteDomainLxcOpenNamespace, /* 1.0.2 */ .domainGetVCPUMap = remoteDomainGetVCPUMap, /* 1.0.7 */ + .domainSetGuestVcpu = remoteDomainSetGuestVcpu, /* 1.0.7 */ }; static virNetworkDriver network_driver = { diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index cec3541..374df42 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2748,6 +2748,13 @@ struct remote_domain_get_vcpu_map_ret { int ret; }; +struct remote_domain_set_guest_vcpu_args { + remote_nonnull_domain dom; + unsigned int id; + unsigned int online; + unsigned int flags; +}; + struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -4450,6 +4457,12 @@ enum remote_procedure { /** * @generate: none */ - REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302 + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, + + /** + * @generate: both + */ + REMOTE_PROC_DOMAIN_SET_GUEST_VCPU = 303 + }; diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index e1ceabd..0ded7d8 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -2198,6 +2198,12 @@ struct remote_domain_get_vcpu_map_ret { } cpumap; int ret; }; +struct remote_domain_set_guest_vcpu_args { + remote_nonnull_domain dom; + u_int id; + u_int online; + u_int flags; +}; struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -2507,4 +2513,5 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE = 300, REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, + REMOTE_PROC_DOMAIN_SET_GUEST_VCPU = 303, }; -- 1.8.2.1

Add a virsh command called "setguestvcpu" to excercise the virDomainSetGuestVcpu API. --- tools/virsh-domain.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ tools/virsh.pod | 6 ++++ 2 files changed, 83 insertions(+) diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 0616487..e801b2d 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5964,6 +5964,77 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd) } /* + * "setguestvcpu" command + */ +static const vshCmdInfo info_setguestvcpu[] = { + {.name = "help", + .data = N_("modify the state of a guest's virtual CPU") + }, + {.name = "desc", + .data = N_("Enable or disable virtual CPUs in a guest using the guest agent.") + }, + {.name = NULL} +}; + +static const vshCmdOptDef opts_setguestvcpu[] = { + {.name = "domain", + .type = VSH_OT_DATA, + .flags = VSH_OFLAG_REQ, + .help = N_("domain name, id or uuid") + }, + {.name = "id", + .type = VSH_OT_INT, + .flags = VSH_OFLAG_REQ, + .help = N_("id of the virtual CPU") + }, + {.name = "online", + .type = VSH_OT_BOOL, + .help = N_("enable the vCPU") + }, + {.name = "offline", + .type = VSH_OT_BOOL, + .help = N_("disable the vCPU") + }, + {.name = NULL} +}; + +static bool +cmdSetguestvcpu(vshControl *ctl, const vshCmd *cmd) +{ + virDomainPtr dom; + unsigned int id; + bool online = vshCommandOptBool(cmd, "online"); + bool offline = vshCommandOptBool(cmd, "offline"); + bool ret = false; + + VSH_EXCLUSIVE_OPTIONS_VAR(online, offline); + + if (!online && !offline) { + vshError(ctl, "%s", + _("need to specify either --online or --offline")); + return false; + } + + if (vshCommandOptUInt(cmd, "id", &id) < 0) { + vshError(ctl, "%s", _("Invalid or missing cpu ID")); + return false; + } + + if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) + return false; + + if (virDomainSetGuestVcpu(dom, id, online, 0) < 0) + goto cleanup; + + ret = true; + +cleanup: + virDomainFree(dom); + return ret; +} + + +/* * "cpu-compare" command */ static const vshCmdInfo info_cpu_compare[] = { @@ -10615,6 +10686,12 @@ const vshCmdDef domManagementCmds[] = { .info = info_setvcpus, .flags = 0 }, + {.name = "setguestvcpu", + .handler = cmdSetguestvcpu, + .opts = opts_setguestvcpu, + .info = info_setguestvcpu, + .flags = 0 + }, {.name = "shutdown", .handler = cmdShutdown, .opts = opts_shutdown, diff --git a/tools/virsh.pod b/tools/virsh.pod index 42e39d3..bde1bc6 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1763,6 +1763,12 @@ If I<--agent> is specified, then guest agent commands are used to retrieve the count of available vCPUs from the perspective of the guest. This flag is usable only for live domains. +=item B<setguestvcpu> I<domain> I<cpu id> [{I<--online> | I<--offline>}] + +Set the online state of a vCPU in the guest. This command +requires the guest agent configured. The state may be either +I<--online> or I<--offline>. The flags are mutually exclusive. + =item B<vcpuinfo> I<domain> Returns basic information about the domain virtual CPUs, like the number of -- 1.8.2.1

Use the helper added earlier to implement this new API. --- src/qemu/qemu_driver.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index ba3a6e1..19a86bf 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -15377,6 +15377,70 @@ cleanup: } #undef MATCH + +static int +qemuDomainSetGuestVcpu(virDomainPtr dom, + unsigned int id, + unsigned int online, + unsigned int flags) +{ + virQEMUDriverPtr driver = dom->conn->privateData; + virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; + int ret = -1; + + qemuAgentCPUInfo agentinfo = { .id = id, + .online = !!online }; + + virCheckFlags(0, -1); + + if (!(vm = qemuDomObjFromDomain(dom))) + return -1; + + priv = vm->privateData; + + /* request data from the guest */ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + if (!priv->agent) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("guest agent is not configured")); + goto endjob; + } + + qemuDomainObjEnterAgent(vm); + ret = qemuAgentSetVCPUs(priv->agent, &agentinfo, 1); + qemuDomainObjExitAgent(vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ret < 0) + goto cleanup; + + if (ret != 0) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("failed to set state of vCPU '%u'"), id); + goto cleanup; + } + + ret = 0; + +cleanup: + if (vm) + virObjectUnlock(vm); + return ret; +} + + static virDriver qemuDriver = { .no = VIR_DRV_QEMU, .name = QEMU_DRIVER_NAME, @@ -15557,6 +15621,7 @@ static virDriver qemuDriver = { .domainFSTrim = qemuDomainFSTrim, /* 1.0.1 */ .domainOpenChannel = qemuDomainOpenChannel, /* 1.0.2 */ .domainGetVCPUMap = qemuDomainGetVCPUMap, /* 1.0.7 */ + .domainSetGuestVcpu = qemuDomainSetGuestVcpu, /* 1.0.7 */ }; -- 1.8.2.1
participants (3)
-
Daniel P. Berrange
-
Michal Privoznik
-
Peter Krempa