[libvirt] [PATCH 00/11] vCPU hotplug mega-series

This patch series consists of multiple parts: -patch 1,2: Two trivial cleanups -patch 3: Improve and refactor vCPU data parsing -patch 4: Add agent monitor helpers for cpu-hotplug stuff -patch 5,6,7: Implement universal guest vCPU mapping function -patch 8: Implement new qemu monitor command for cpu hotplug -patch 9,10,11: Implement agent based cpu state manipulation API/command Tip: The new "virsh vcpumap guest --active" command may be used that the agent actually offlined the vCPU in the guest. Peter Krempa (11): virsh-domain-monitor: Remove ATTRIBUTE_UNUSED from a argument qemu: Use bool instead of int in qemuMonitorSetCPU APIs qemu: Extract more information about vCPUs and threads qemu_agent: Introduce helpers for agent based CPU hot(un)plug lib: Add API to map virtual cpus of a guest virsh-domain-monitor: Implement command to map guest vCPUs qemu: Implement virDomainGetVCPUMap for the qemu driver qemu: Implement new QMP command for cpu hotplug lib: Add API to modify vCPU state from the guest using the guest agent virsh-domain: Implement command for virDomainSetGuestVcpu qemu: Implement virDomainSetGuetVcpu in qemu driver daemon/remote.c | 54 ++++++++ include/libvirt/libvirt.h.in | 25 ++++ python/generator.py | 1 + python/libvirt-override-api.xml | 7 + python/libvirt-override.c | 66 ++++++++++ src/driver.h | 14 ++ src/libvirt.c | 129 +++++++++++++++++++ src/libvirt_public.syms | 7 + src/qemu/qemu_agent.c | 148 +++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++ src/qemu/qemu_driver.c | 279 +++++++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.c | 11 +- src/qemu/qemu_monitor.h | 13 +- src/qemu/qemu_monitor_json.c | 86 ++++++++++--- src/qemu/qemu_monitor_json.h | 4 +- src/qemu/qemu_monitor_text.c | 94 ++++++++------ src/qemu/qemu_monitor_text.h | 4 +- src/qemu/qemu_process.c | 63 ++++++--- src/remote/remote_driver.c | 48 +++++++ src/remote/remote_protocol.x | 31 ++++- src/remote_protocol-structs | 20 +++ tools/virsh-domain-monitor.c | 112 +++++++++++++++- tools/virsh-domain.c | 77 +++++++++++ tools/virsh.pod | 29 +++++ 24 files changed, 1224 insertions(+), 110 deletions(-) -- 1.8.2.1

The "cmd" argument in cmdList is now used. Unmark it as unused. --- tools/virsh-domain-monitor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c index 5ed89d1..936fa8e 100644 --- a/tools/virsh-domain-monitor.c +++ b/tools/virsh-domain-monitor.c @@ -1689,7 +1689,7 @@ static const vshCmdOptDef opts_list[] = { if (vshCommandOptBool(cmd, NAME)) \ flags |= (FLAG) static bool -cmdList(vshControl *ctl, const vshCmd *cmd ATTRIBUTE_UNUSED) +cmdList(vshControl *ctl, const vshCmd *cmd) { bool managed = vshCommandOptBool(cmd, "managed-save"); bool optTitle = vshCommandOptBool(cmd, "title"); -- 1.8.2.1

The 'online' parameter has only two possible values. Use a bool for it. --- src/qemu/qemu_driver.c | 4 ++-- src/qemu/qemu_monitor.c | 2 +- src/qemu/qemu_monitor.h | 2 +- src/qemu/qemu_monitor_json.c | 2 +- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 2 +- src/qemu/qemu_monitor_text.h | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 4a76f14..4a4f8d3 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3534,7 +3534,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, if (nvcpus > vcpus) { for (i = vcpus; i < nvcpus; i++) { /* Online new CPU */ - rc = qemuMonitorSetCPU(priv->mon, i, 1); + rc = qemuMonitorSetCPU(priv->mon, i, true); if (rc == 0) goto unsupported; if (rc < 0) @@ -3545,7 +3545,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } else { for (i = vcpus - 1; i >= nvcpus; i--) { /* Offline old CPU */ - rc = qemuMonitorSetCPU(priv->mon, i, 0); + rc = qemuMonitorSetCPU(priv->mon, i, false); if (rc == 0) goto unsupported; if (rc < 0) diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index 4e35f79..af85c29 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1669,7 +1669,7 @@ int qemuMonitorSetBalloon(qemuMonitorPtr mon, } -int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, int online) +int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, bool online) { int ret; VIR_DEBUG("mon=%p cpu=%d online=%d", mon, cpu, online); diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index a607712..3d9afa3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -299,7 +299,7 @@ int qemuMonitorExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorSetCPU(qemuMonitorPtr mon, int cpu, bool online); /* XXX should we pass the virDomainDiskDefPtr instead diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 2b73884..26bf09b 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2076,7 +2076,7 @@ cleanup: * or -1 on failure */ int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, - int cpu, int online) + int cpu, bool online) { /* XXX Update to use QMP, if QMP ever adds support for cpu hotplug */ VIR_DEBUG("no QMP support for cpu_set, trying HMP"); diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index 74e2476..d79b86b 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -94,7 +94,7 @@ int qemuMonitorJSONExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorJSONSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, bool online); int qemuMonitorJSONEjectMedia(qemuMonitorPtr mon, const char *dev_name, diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index d4ee93d..aa4145e 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -1231,7 +1231,7 @@ int qemuMonitorTextSetBalloon(qemuMonitorPtr mon, * Returns: 0 if CPU hotplug not supported, +1 if CPU hotplug worked * or -1 on failure */ -int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, int online) +int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, bool online) { char *cmd; char *reply = NULL; diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index fb8e904..5218a8b 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -90,7 +90,7 @@ int qemuMonitorTextExpirePassword(qemuMonitorPtr mon, const char *expire_time); int qemuMonitorTextSetBalloon(qemuMonitorPtr mon, unsigned long newmem); -int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, int online); +int qemuMonitorTextSetCPU(qemuMonitorPtr mon, int cpu, bool online); int qemuMonitorTextEjectMedia(qemuMonitorPtr mon, const char *dev_name, -- 1.8.2.1

The qemu monitor provides more information about vCPUs of a guest than we needed currently. This patch upgrades the extraction function to easily extract new data about the vCPUs and fixes code to cope with the new structure. The information extracted here will be later used for mapping of vCPUs of a guest. This patch also refactors the function used to parse data from the text monitor. --- src/qemu/qemu_driver.c | 31 ++++++++------- src/qemu/qemu_monitor.c | 9 +++-- src/qemu/qemu_monitor.h | 11 +++++- src/qemu/qemu_monitor_json.c | 47 +++++++++++++--------- src/qemu/qemu_monitor_json.h | 2 +- src/qemu/qemu_monitor_text.c | 92 +++++++++++++++++++++++++------------------- src/qemu/qemu_monitor_text.h | 2 +- src/qemu/qemu_process.c | 63 ++++++++++++++++++++---------- 8 files changed, 159 insertions(+), 98 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 4a4f8d3..0041d8f 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -3522,8 +3522,8 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, int ret = -1; int oldvcpus = vm->def->vcpus; int vcpus = oldvcpus; - pid_t *cpupids = NULL; - int ncpupids; + qemuMonitorCPUInfoPtr cpuinfo = NULL; + int ncpuinfo; virCgroupPtr cgroup_vcpu = NULL; qemuDomainObjEnterMonitor(driver, vm); @@ -3564,13 +3564,13 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, * or don't have the "info cpus" command (and they don't support multiple * CPUs anyways), so errors in the re-detection will not be treated * fatal */ - if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) <= 0) { + if ((ncpuinfo = qemuMonitorGetCPUInfo(priv->mon, &cpuinfo)) <= 0) { virResetLastError(); goto cleanup; } /* check if hotplug has failed */ - if (vcpus < oldvcpus && ncpupids == oldvcpus) { + if (vcpus < oldvcpus && ncpuinfo == oldvcpus) { virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", _("qemu didn't unplug the vCPUs properly")); vcpus = oldvcpus; @@ -3578,11 +3578,11 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, goto cleanup; } - if (ncpupids != vcpus) { + if (ncpuinfo != vcpus) { virReportError(VIR_ERR_INTERNAL_ERROR, _("got wrong number of vCPU pids from QEMU monitor. " "got %d, wanted %d"), - ncpupids, vcpus); + ncpuinfo, vcpus); ret = -1; goto cleanup; } @@ -3602,11 +3602,11 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } /* Add vcpu thread to the cgroup */ - rv = virCgroupAddTask(cgroup_vcpu, cpupids[i]); + rv = virCgroupAddTask(cgroup_vcpu, cpuinfo[i].thread_id); if (rv < 0) { virReportSystemError(-rv, _("unable to add vcpu %d task %d to cgroup"), - i, cpupids[i]); + i, cpuinfo[i].thread_id); virCgroupRemove(cgroup_vcpu); goto cleanup; } @@ -3646,7 +3646,7 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, goto cleanup; } } else { - if (virProcessSetAffinity(cpupids[i], + if (virProcessSetAffinity(cpuinfo[i].thread_id, vcpupin->cpumask) < 0) { virReportError(VIR_ERR_SYSTEM_ERROR, _("failed to set cpu affinity for vcpu %d"), @@ -3687,15 +3687,18 @@ static int qemuDomainHotplugVcpus(virQEMUDriverPtr driver, } } - priv->nvcpupids = ncpupids; - VIR_FREE(priv->vcpupids); - priv->vcpupids = cpupids; - cpupids = NULL; + if (VIR_REALLOC_N(priv->vcpupids, ncpuinfo) < 0) { + virReportOOMError(); + goto cleanup; + } + + /* copy the thread id's */ + for (i = 0; i < ncpuinfo; i++) + priv->vcpupids[i] = cpuinfo[i].thread_id; cleanup: qemuDomainObjExitMonitor(driver, vm); vm->def->vcpus = vcpus; - VIR_FREE(cpupids); virDomainAuditVcpu(vm, oldvcpus, nvcpus, "update", rc == 1); if (cgroup_vcpu) virCgroupFree(&cgroup_vcpu); diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c index af85c29..783af2e 100644 --- a/src/qemu/qemu_monitor.c +++ b/src/qemu/qemu_monitor.c @@ -1281,8 +1281,9 @@ int qemuMonitorSystemReset(qemuMonitorPtr mon) } -int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, - int **pids) +int +qemuMonitorGetCPUInfo(qemuMonitorPtr mon, + qemuMonitorCPUInfoPtr *info) { int ret; VIR_DEBUG("mon=%p", mon); @@ -1294,9 +1295,9 @@ int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, } if (mon->json) - ret = qemuMonitorJSONGetCPUInfo(mon, pids); + ret = qemuMonitorJSONGetCPUInfo(mon, info); else - ret = qemuMonitorTextGetCPUInfo(mon, pids); + ret = qemuMonitorTextGetCPUInfo(mon, info); return ret; } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index 3d9afa3..c51fee3 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -252,8 +252,17 @@ int qemuMonitorGetStatus(qemuMonitorPtr mon, int qemuMonitorSystemReset(qemuMonitorPtr mon); int qemuMonitorSystemPowerdown(qemuMonitorPtr mon); +typedef struct _qemuMonitorCPUInfo qemuMonitorCPUInfo; +typedef qemuMonitorCPUInfo *qemuMonitorCPUInfoPtr; +struct _qemuMonitorCPUInfo { + unsigned int id; + bool active; + pid_t thread_id; +}; + int qemuMonitorGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); + int qemuMonitorGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 26bf09b..4a69fec 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -1164,17 +1164,17 @@ int qemuMonitorJSONSystemReset(qemuMonitorPtr mon) /* - * [ { "CPU": 0, "current": true, "halted": false, "pc": 3227107138 }, - * { "CPU": 1, "current": false, "halted": true, "pc": 7108165 } ] + * [ { "CPU": 0, "current": true, "halted": false, "pc": 3227107138, "thread_id": 1234}, + * { "CPU": 1, "current": false, "halted": true, "pc": 7108165, "thread_id": 5678 } ] */ static int qemuMonitorJSONExtractCPUInfo(virJSONValuePtr reply, - int **pids) + qemuMonitorCPUInfoPtr *info) { virJSONValuePtr data; int ret = -1; int i; - int *threads = NULL; + qemuMonitorCPUInfoPtr cpus = NULL; int ncpus; if (!(data = virJSONValueObjectGet(reply, "return"))) { @@ -1195,49 +1195,60 @@ qemuMonitorJSONExtractCPUInfo(virJSONValuePtr reply, goto cleanup; } - if (VIR_REALLOC_N(threads, ncpus) < 0) { + if (VIR_ALLOC_N(cpus, ncpus) < 0) { virReportOOMError(); goto cleanup; } for (i = 0; i < ncpus; i++) { virJSONValuePtr entry = virJSONValueArrayGet(data, i); - int thread; + bool halted; + if (!entry) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", - _("character device information was missing array element")); + _("cpu information was missing array element")); goto cleanup; } - if (virJSONValueObjectGetNumberInt(entry, "thread_id", &thread) < 0) { - /* Only qemu-kvm tree includs thread_id, so treat this as - non-fatal, simply returning no data */ - ret = 0; + if (virJSONValueObjectGetNumberUint(entry, "CPU", &cpus[i].id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("CPU id missing in cpu information")); goto cleanup; } - threads[i] = thread; + if (virJSONValueObjectGetNumberInt(entry, "thread_id", + &cpus[i].thread_id) < 0) { + /* Some versions of qemu don't provide this information */ + cpus[i].thread_id = 0; + } + + if (virJSONValueObjectGetBoolean(entry, "halted", &halted) < 0) { + /* Not that important that we should fail */ + halted = false; + } + + cpus[i].active = !halted; } - *pids = threads; - threads = NULL; + *info = cpus; + cpus = NULL; ret = ncpus; cleanup: - VIR_FREE(threads); + VIR_FREE(cpus); return ret; } int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, - int **pids) + qemuMonitorCPUInfoPtr *info) { int ret; virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("query-cpus", NULL); virJSONValuePtr reply = NULL; - *pids = NULL; + *info = NULL; if (!cmd) return -1; @@ -1248,7 +1259,7 @@ int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, ret = qemuMonitorJSONCheckError(cmd, reply); if (ret == 0) - ret = qemuMonitorJSONExtractCPUInfo(reply, pids); + ret = qemuMonitorJSONExtractCPUInfo(reply, info); virJSONValueFree(cmd); virJSONValueFree(reply); diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h index d79b86b..ac077b1 100644 --- a/src/qemu/qemu_monitor_json.h +++ b/src/qemu/qemu_monitor_json.h @@ -53,7 +53,7 @@ int qemuMonitorJSONSystemPowerdown(qemuMonitorPtr mon); int qemuMonitorJSONSystemReset(qemuMonitorPtr mon); int qemuMonitorJSONGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); int qemuMonitorJSONGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorJSONGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_monitor_text.c b/src/qemu/qemu_monitor_text.c index aa4145e..a967ae6 100644 --- a/src/qemu/qemu_monitor_text.c +++ b/src/qemu/qemu_monitor_text.c @@ -501,13 +501,16 @@ int qemuMonitorTextSystemReset(qemuMonitorPtr mon) { } -int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, - int **pids) +int +qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, + qemuMonitorCPUInfoPtr *info) { char *qemucpus = NULL; - char *line; - pid_t *cpupids = NULL; - size_t ncpupids = 0; + char **cpulines = NULL; + char **cpuline; + qemuMonitorCPUInfoPtr cpus = NULL; + size_t ncpus = 0; + int ret = -1; if (qemuMonitorHMPCommand(mon, "info cpus", &qemucpus) < 0) return -1; @@ -516,52 +519,63 @@ int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, * This is the gross format we're about to parse :-{ * * (qemu) info cpus - * * CPU #0: pc=0x00000000000f0c4a thread_id=30019 - * CPU #1: pc=0x00000000fffffff0 thread_id=30020 - * CPU #2: pc=0x00000000fffffff0 thread_id=30021 + * * CPU #0: pc=0xffffffff8103b90b (halted) thread_id=874519 + * CPU #1: pc=0xffffffff8102e191 (halted) thread_id=874520 + * CPU #2: pc=0xffffffff812c12ae thread_id=874521 + * CPU #3: pc=0xffffffff810521cf thread_id=874522 * */ - line = qemucpus; - do { - char *offset = NULL; - char *end = NULL; + if (!(cpulines = virStringSplit(qemucpus, "\n", 0))) + goto cleanup; + + cpuline = cpulines; + + while (*cpuline++) { + char *offset; + char *end; + int id; int tid = 0; + bool halted = false; - /* Extract host Thread ID */ - if ((offset = strstr(line, "thread_id=")) == NULL) - goto error; + /* Extract vCPU id */ + if (!(offset = strchr(*cpuline, '#')) || + virStrToLong_i(offset + 1, &end, 10, &id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("failed to extract vCPU id")); + goto cleanup; + } - if (virStrToLong_i(offset + strlen("thread_id="), &end, 10, &tid) < 0) - goto error; - if (end == NULL || !c_isspace(*end)) - goto error; + /* extract cpu state */ + if (strstr(*cpuline, "(halted)")) + halted = true; - if (VIR_REALLOC_N(cpupids, ncpupids+1) < 0) - goto error; + /* Extract host Thread ID, some versions of qemu may lack this info */ + if ((offset = strstr(*cpuline, "thread_id="))) { + if (virStrToLong_i(offset + strlen("thread_id="), &end, 10, &tid) < 0) + tid = 0; + } - VIR_DEBUG("tid=%d", tid); - cpupids[ncpupids++] = tid; + ncpus++; - /* Skip to next data line */ - line = strchr(offset, '\r'); - if (line == NULL) - line = strchr(offset, '\n'); - } while (line != NULL); + if (VIR_REALLOC_N(cpus, ncpus) < 0) { + virReportOOMError(); + goto cleanup; + } - /* Validate we got data for all VCPUs we expected */ - VIR_FREE(qemucpus); - *pids = cpupids; - return ncpupids; + cpus[ncpus].id = id; + cpus[ncpus].thread_id = tid; + cpus[ncpus].active = !halted; + } -error: + *info = cpus; + ret = ncpus; + +cleanup: VIR_FREE(qemucpus); - VIR_FREE(cpupids); + VIR_FREE(cpus); + virStringFreeList(cpulines); - /* Returning 0 to indicate non-fatal failure, since - * older QEMU does not have VCPU<->PID mapping and - * we don't want to fail on that - */ - return 0; + return ret; } diff --git a/src/qemu/qemu_monitor_text.h b/src/qemu/qemu_monitor_text.h index 5218a8b..08f4280 100644 --- a/src/qemu/qemu_monitor_text.h +++ b/src/qemu/qemu_monitor_text.h @@ -50,7 +50,7 @@ int qemuMonitorTextSystemPowerdown(qemuMonitorPtr mon); int qemuMonitorTextSystemReset(qemuMonitorPtr mon); int qemuMonitorTextGetCPUInfo(qemuMonitorPtr mon, - int **pids); + qemuMonitorCPUInfoPtr *info); int qemuMonitorTextGetVirtType(qemuMonitorPtr mon, int *virtType); int qemuMonitorTextGetBalloonInfo(qemuMonitorPtr mon, diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d4fd4fb..8de14c3 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -1755,40 +1755,63 @@ static int qemuProcessDetectVcpuPIDs(virQEMUDriverPtr driver, virDomainObjPtr vm) { - pid_t *cpupids = NULL; - int ncpupids; qemuDomainObjPrivatePtr priv = vm->privateData; + qemuMonitorCPUInfoPtr info = NULL; + int ninfo; + int i; + int ret = -1; - qemuDomainObjEnterMonitor(driver, vm); /* failure to get the VCPU<-> PID mapping or to execute the query * command will not be treated fatal as some versions of qemu don't * support this command */ - if ((ncpupids = qemuMonitorGetCPUInfo(priv->mon, &cpupids)) <= 0) { - qemuDomainObjExitMonitor(driver, vm); - virResetLastError(); + qemuDomainObjEnterMonitor(driver, vm); + ninfo = qemuMonitorGetCPUInfo(priv->mon, &info); + qemuDomainObjExitMonitor(driver, vm); - priv->nvcpupids = 1; - if (VIR_ALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) { - virReportOOMError(); - return -1; - } - priv->vcpupids[0] = vm->pid; - return 0; + if (ninfo < 0) + goto fallback; + + if (VIR_ALLOC_N(priv->vcpupids, ninfo) < 0) { + virReportOOMError(); + goto cleanup; } - qemuDomainObjExitMonitor(driver, vm); - if (ncpupids != vm->def->vcpus) { + for (i = 0; i < ninfo; i++) { + /* some versions of qemu don't expose thread IDs. Fall back gracefully */ + if (info[i].thread_id == 0) + goto fallback; + + priv->vcpupids[i] = info[i].thread_id; + } + + if (ninfo != vm->def->vcpus) { virReportError(VIR_ERR_INTERNAL_ERROR, _("got wrong number of vCPU pids from QEMU monitor. " "got %d, wanted %d"), - ncpupids, vm->def->vcpus); - VIR_FREE(cpupids); - return -1; + ninfo, vm->def->vcpus); + VIR_FREE(priv->vcpupids); + goto cleanup; } - priv->nvcpupids = ncpupids; - priv->vcpupids = cpupids; + priv->nvcpupids = ninfo; + ret = 0; + +cleanup: + VIR_FREE(info); + return ret; + +fallback: + VIR_FREE(info); + VIR_FREE(priv->vcpupids); + virResetLastError(); + priv->nvcpupids = 1; + if (VIR_ALLOC_N(priv->vcpupids, priv->nvcpupids) < 0) { + virReportOOMError(); + return -1; + } + priv->vcpupids[0] = vm->pid; return 0; + } -- 1.8.2.1

The qemu guest agent allows to online and offline CPUs from the perspective of the guest. This patch adds helpers that call 'guest-get-vcpus' and 'guest-set-vcpus' guest agent functions and convert the data for internal libvirt usage. --- src/qemu/qemu_agent.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/qemu/qemu_agent.h | 12 ++++ 2 files changed, 160 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index c7a9681..b094763 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1456,3 +1456,151 @@ qemuAgentFSTrim(qemuAgentPtr mon, virJSONValueFree(reply); return ret; } + +int +qemuAgentGetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr *info) +{ + int ret = -1; + int i; + virJSONValuePtr cmd; + virJSONValuePtr reply = NULL; + virJSONValuePtr data = NULL; + int ndata; + + if (!(cmd = qemuAgentMakeCommand("guest-get-vcpus", NULL))) + return -1; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (!(data = virJSONValueObjectGet(reply, "return"))) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus reply was missing return data")); + goto cleanup; + } + + if (data->type != VIR_JSON_TYPE_ARRAY) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("guest-get-vcpus return information was not an array")); + goto cleanup; + } + + ndata = virJSONValueArraySize(data); + + if (VIR_ALLOC_N(*info, ndata) < 0) { + virReportOOMError(); + goto cleanup; + } + + for (i = 0; i < ndata; i++) { + virJSONValuePtr entry = virJSONValueArrayGet(data, i); + qemuAgentCPUInfoPtr in = *info + i; + + if (!entry) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("array element missing in guest-get-vcpus return " + "value")); + goto cleanup; + } + + if (virJSONValueObjectGetNumberUint(entry, "logical-id", &in->id) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'logical-id' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "online", &in->online) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'online' missing in reply of guest-get-vcpus")); + goto cleanup; + } + + if (virJSONValueObjectGetBoolean(entry, "can-offline", + &in->offlinable) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("'can-offline' missing in reply of guest-get-vcpus")); + goto cleanup; + } + } + + ret = ndata; + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(data); + return ret; +} + +/** + * Set the VCPU state using guest agent. + * + * Returns -1 on error, ninfo in case everything was successful and less than + * ninfo on a partial failure. + */ +int +qemuAgentSetVCPUs(qemuAgentPtr mon, + qemuAgentCPUInfoPtr info, + size_t ninfo) +{ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + virJSONValuePtr cpus = NULL; + virJSONValuePtr cpu = NULL; + size_t i; + + /* create the key data array */ + if (!(cpus = virJSONValueNewArray())) + goto no_memory; + + for (i = 0; i < ninfo; i++) { + qemuAgentCPUInfoPtr in = &info[i]; + + /* create single cpu object */ + if (!(cpu = virJSONValueNewObject())) + goto no_memory; + + if (virJSONValueObjectAppendNumberInt(cpu, "logical-id", in->id) < 0) + goto no_memory; + + if (virJSONValueObjectAppendBoolean(cpu, "online", in->online) < 0) + goto no_memory; + + if (virJSONValueArrayAppend(cpus, cpu) < 0) + goto no_memory; + + cpu = NULL; + } + + if (!(cmd = qemuAgentMakeCommand("guest-set-vcpus", + "a:vcpus", cpus, + NULL))) + goto cleanup; + + cpus = NULL; + + if (qemuAgentCommand(mon, cmd, &reply, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) < 0 || + qemuAgentCheckError(cmd, reply) < 0) + goto cleanup; + + if (virJSONValueObjectGetNumberInt(reply, "return", &ret) < 0) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("malformed return value")); + } + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + virJSONValueFree(cpu); + virJSONValueFree(cpus); + return ret; + +no_memory: + virReportOOMError(); + goto cleanup; +} diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index ba04e61..cf70653 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -82,4 +82,16 @@ int qemuAgentArbitraryCommand(qemuAgentPtr mon, int timeout); int qemuAgentFSTrim(qemuAgentPtr mon, unsigned long long minimum); + + +typedef struct _qemuAgentCPUInfo qemuAgentCPUInfo; +typedef qemuAgentCPUInfo *qemuAgentCPUInfoPtr; +struct _qemuAgentCPUInfo { + unsigned int id; /* logical cpu ID */ + bool online; /* true if the CPU is activated */ + bool offlinable; /* true if the CPU can be offlined */ +}; + +int qemuAgentGetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr *info); +int qemuAgentSetVCPUs(qemuAgentPtr mon, qemuAgentCPUInfoPtr cpus, size_t ncpus); #endif /* __QEMU_AGENT_H__ */ -- 1.8.2.1

QEMU recently added support for cpu hotplug upstream that will support plugging arbitrary cpus. Additionally guest-agent-based cpu state modification from the guest point of view was added recently. This API will help monitoring the state of vCPUs using the two apporoaches as a support infrastructure for the modification APIs. --- daemon/remote.c | 54 ++++++++++++++++++++++++++++++ include/libvirt/libvirt.h.in | 21 ++++++++++++ python/generator.py | 1 + python/libvirt-override-api.xml | 7 ++++ python/libvirt-override.c | 66 ++++++++++++++++++++++++++++++++++++ src/driver.h | 6 ++++ src/libvirt.c | 74 +++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 6 ++++ src/remote/remote_driver.c | 47 ++++++++++++++++++++++++++ src/remote/remote_protocol.x | 18 +++++++++- src/remote_protocol-structs | 13 ++++++++ 11 files changed, 312 insertions(+), 1 deletion(-) diff --git a/daemon/remote.c b/daemon/remote.c index 0e253bf..bb0640a 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -4583,6 +4583,60 @@ cleanup: return rv; } + +static int +remoteDispatchDomainGetVcpuMap(virNetServerPtr server ATTRIBUTE_UNUSED, + virNetServerClientPtr client ATTRIBUTE_UNUSED, + virNetMessagePtr msg ATTRIBUTE_UNUSED, + virNetMessageErrorPtr rerr, + remote_domain_get_vcpu_map_args *args, + remote_domain_get_vcpu_map_ret *ret) +{ + unsigned char *cpumap = NULL; + unsigned int flags; + int cpunum; + int rv = -1; + virDomainPtr dom = NULL; + struct daemonClientPrivate *priv = + virNetServerClientGetPrivateData(client); + + if (!priv->conn) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("connection not open")); + goto cleanup; + } + + if (!(dom = get_nonnull_domain(priv->conn, args->dom))) + goto cleanup; + + flags = args->flags; + + cpunum = virDomainGetVCPUMap(dom, + args->need_map ? &cpumap : NULL, + flags); + if (cpunum < 0) + goto cleanup; + + /* 'serialize' return cpumap */ + if (args->need_map) { + ret->cpumap.cpumap_len = VIR_CPU_MAPLEN(cpunum); + ret->cpumap.cpumap_val = (char *) cpumap; + cpumap = NULL; + } + + ret->ret = cpunum; + + rv = 0; + +cleanup: + if (rv < 0) + virNetMessageSaveError(rerr); + if (dom) + virDomainFree(dom); + VIR_FREE(cpumap); + return rv; +} + + static int lxcDispatchDomainOpenNamespace(virNetServerPtr server ATTRIBUTE_UNUSED, virNetServerClientPtr client ATTRIBUTE_UNUSED, diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 1804c93..46d499c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -4858,6 +4858,27 @@ int virNWFilterGetUUIDString (virNWFilterPtr nwfilter, char *buf); char * virNWFilterGetXMLDesc (virNWFilterPtr nwfilter, unsigned int flags); + +/** + * virDomainGetVCPUMapFlags + * + * Since 1.0.6 + */ +typedef enum { + VIR_DOMAIN_VCPU_MAP_HYPERVISOR = (1 << 0), /* request data from hypervisor */ + VIR_DOMAIN_VCPU_MAP_AGENT = (1 << 1), /* request data from guest agent */ + + VIR_DOMAIN_VCPU_MAP_POSSIBLE = (1 << 2), /* map all possible vcpus */ + VIR_DOMAIN_VCPU_MAP_ONLINE = (1 << 3), /* map all online vcpus */ + VIR_DOMAIN_VCPU_MAP_OFFLINE = (1 << 4), /* map all offline vcpus */ + VIR_DOMAIN_VCPU_MAP_OFFLINABLE = (1 << 5), /* map all vcpus that can be offlined */ + VIR_DOMAIN_VCPU_MAP_ACTIVE = (1 << 6), /* map cpus that are in use by the guest */ +} virDomainGetVCPUMapFlags; + +int virDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags); + /** * virDomainConsoleFlags * diff --git a/python/generator.py b/python/generator.py index 8c380bb..4884c29 100755 --- a/python/generator.py +++ b/python/generator.py @@ -458,6 +458,7 @@ skip_impl = ( 'virNodeGetMemoryParameters', 'virNodeSetMemoryParameters', 'virNodeGetCPUMap', + 'virDomainGetVCPUMap', ) lxc_skip_impl = ( diff --git a/python/libvirt-override-api.xml b/python/libvirt-override-api.xml index 155ab36..7725800 100644 --- a/python/libvirt-override-api.xml +++ b/python/libvirt-override-api.xml @@ -584,5 +584,12 @@ <arg name='conn' type='virConnectPtr' info='pointer to the hypervisor connection'/> <arg name='flags' type='int' info='unused, pass 0'/> </function> + <function name='virDomainGetVCPUMap' file='python'> + <info>Get node CPU information</info> + <return type='char *' info='(cpunum, cpumap) on success, None on error'/> + <arg name='dom' type='virDomainPtr' info='pointer to the domain'/> + <arg name='flags' type='unsigned int' info='an OR'ed set of virDomainGetVCPUMapFlags'/> + </function> + </symbols> </api> diff --git a/python/libvirt-override.c b/python/libvirt-override.c index 6ed2624..016b192 100644 --- a/python/libvirt-override.c +++ b/python/libvirt-override.c @@ -6759,6 +6759,71 @@ error: } +static PyObject * +libvirt_virDomainGetVCPUMap(PyObject *self ATTRIBUTE_UNUSED, + PyObject *args) +{ + virDomainPtr domain; + PyObject *pyobj_domain; + PyObject *ret = NULL; + PyObject *pycpumap = NULL; + PyObject *pyused = NULL; + PyObject *pycpunum = NULL; + PyObject *pyonline = NULL; + int i_retval; + unsigned char *cpumap = NULL; + unsigned int flags; + int i; + + if (!PyArg_ParseTuple(args, (char *)"Oi:virDomainGetVCPUMap", + &pyobj_domain, &flags)) + return NULL; + + domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virDomainGetVCPUMap(domain, &cpumap, flags); + LIBVIRT_END_ALLOW_THREADS; + + if (i_retval < 0) + return VIR_PY_NONE; + + if (!(ret = PyTuple_New(2))) + goto error; + + /* 0: number of CPUs */ + if ((pycpunum = PyLong_FromLong(i_retval)) == NULL || + PyTuple_SetItem(ret, 0, pycpunum) < 0) + goto error; + + /* 1: CPU map */ + if ((pycpumap = PyList_New(i_retval)) == NULL) + goto error; + + for (i = 0; i < i_retval; i++) { + if ((pyused = PyBool_FromLong(VIR_CPU_USED(cpumap, i))) == NULL) + goto error; + if (PyList_SetItem(pycpumap, i, pyused) < 0) + goto error; + } + + if (PyTuple_SetItem(ret, 1, pycpumap) < 0) + goto error; + +cleanup: + VIR_FREE(cpumap); + return ret; +error: + Py_XDECREF(ret); + Py_XDECREF(pycpumap); + Py_XDECREF(pyused); + Py_XDECREF(pycpunum); + Py_XDECREF(pyonline); + ret = NULL; + goto cleanup; +} + + /************************************************************************ * * * The registration stuff * @@ -6882,6 +6947,7 @@ static PyMethodDef libvirtMethods[] = { {(char *) "virNodeGetMemoryParameters", libvirt_virNodeGetMemoryParameters, METH_VARARGS, NULL}, {(char *) "virNodeSetMemoryParameters", libvirt_virNodeSetMemoryParameters, METH_VARARGS, NULL}, {(char *) "virNodeGetCPUMap", libvirt_virNodeGetCPUMap, METH_VARARGS, NULL}, + {(char *) "virDomainGetVCPUMap", libvirt_virDomainGetVCPUMap, METH_VARARGS, NULL}, {NULL, NULL, 0, NULL} }; diff --git a/src/driver.h b/src/driver.h index e998adf..0caa2d6 100644 --- a/src/driver.h +++ b/src/driver.h @@ -1047,6 +1047,11 @@ typedef int int **fdlist, unsigned int flags); +typedef int +(*virDrvDomainGetVCPUMap)(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags); + typedef struct _virDriver virDriver; typedef virDriver *virDriverPtr; @@ -1248,6 +1253,7 @@ struct _virDriver { virDrvDomainFSTrim domainFSTrim; virDrvDomainSendProcessSignal domainSendProcessSignal; virDrvDomainLxcOpenNamespace domainLxcOpenNamespace; + virDrvDomainGetVCPUMap domainGetVCPUMap; }; diff --git a/src/libvirt.c b/src/libvirt.c index b129611..3f51e83 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -9499,6 +9499,80 @@ error: return -1; } + +/** + * virDomainGetVCPUMap: + * @domain: pointer to domain object + * @cpumap: pointer to a pointer where the returned cpumap will be stored + * @flags: bitwise-OR of virDomainGetVCPUMapFlags + * + * Request a map of virtual processors of a domain. Use @flags to control + * the data contained in the map. + * + * If @flags is 0 this function behaves as if flags + * VIR_DOMAIN_VCPU_MAP_HYPERVISOR and VIR_DOMAIN_VCPU_MAP_POSSIBLE were + * specified. + * + * When @flags contains VIR_DOMAIN_VCPU_MAP_AGENT the guest agent is used + * to query the requested data from the point of view of the guest. + * + * Returns the length of @cpumap or -1 in case of error. + */ +int +virDomainGetVCPUMap(virDomainPtr domain, + unsigned char **cpumap, + unsigned int flags) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN(domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + + conn = domain->conn; + + if ((flags & (VIR_DOMAIN_VCPU_MAP_HYPERVISOR | + VIR_DOMAIN_VCPU_MAP_AGENT)) == 0) + flags |= VIR_DOMAIN_VCPU_MAP_HYPERVISOR; + + if ((flags & (VIR_DOMAIN_VCPU_MAP_POSSIBLE | + VIR_DOMAIN_VCPU_MAP_ONLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) == 0) + flags |= VIR_DOMAIN_VCPU_MAP_ONLINE; + + if (flags & VIR_DOMAIN_VCPU_MAP_HYPERVISOR && + flags & VIR_DOMAIN_VCPU_MAP_AGENT) { + virReportInvalidArg(flags, + _("flags VIR_DOMAIN_VCPU_MAP_HYPERVISOR and " + "VIR_DOMAIN_VCPU_MAP_AGENT in %s " + "are mutually exclusive"), + __FUNCTION__); + goto error; + } + + if (conn->driver->domainGetVCPUMap) { + int ret; + ret = conn->driver->domainGetVCPUMap(domain, cpumap, flags); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; + +} + /** * virDomainGetSecurityLabel: * @domain: a domain object diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 4ee2d27..04465be 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -621,4 +621,10 @@ LIBVIRT_1.0.6 { virGetLastErrorMessage; } LIBVIRT_1.0.5; +LIBVIRT_1.0.7 { + global: + virDomainGetVCPUMap; +} LIBVIRT_1.0.6; + + # .... define new API here using predicted next version number .... diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 2cda559..99fa3c1 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -5951,6 +5951,52 @@ done: static int +remoteDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags) +{ + int rv = -1; + remote_domain_get_vcpu_map_args args; + remote_domain_get_vcpu_map_ret ret; + struct private_data *priv = dom->conn->privateData; + + remoteDriverLock(priv); + + args.need_map = !!cpumap; + args.flags = flags; + make_nonnull_domain(&args.dom, dom); + + memset(&ret, 0, sizeof(ret)); + if (call(dom->conn, priv, 0, REMOTE_PROC_DOMAIN_GET_VCPU_MAP, + (xdrproc_t) xdr_remote_domain_get_vcpu_map_args, + (char *) &args, + (xdrproc_t) xdr_remote_domain_get_vcpu_map_ret, + (char *) &ret) == -1) + goto done; + + if (ret.ret < 0) + goto cleanup; + + if (cpumap) { + if (VIR_ALLOC_N(*cpumap, ret.cpumap.cpumap_len) < 0) { + virReportOOMError(); + goto cleanup; + } + memcpy(*cpumap, ret.cpumap.cpumap_val, ret.cpumap.cpumap_len); + } + + rv = ret.ret; + +cleanup: + xdr_free((xdrproc_t) xdr_remote_domain_get_vcpu_map_ret, (char *) &ret); +done: + remoteDriverUnlock(priv); + return rv; +} + + + +static int remoteDomainLxcOpenNamespace(virDomainPtr domain, int **fdlist, unsigned int flags) @@ -6348,6 +6394,7 @@ static virDriver remote_driver = { .nodeGetCPUMap = remoteNodeGetCPUMap, /* 1.0.0 */ .domainFSTrim = remoteDomainFSTrim, /* 1.0.1 */ .domainLxcOpenNamespace = remoteDomainLxcOpenNamespace, /* 1.0.2 */ + .domainGetVCPUMap = remoteDomainGetVCPUMap, /* 1.0.7 */ }; static virNetworkDriver network_driver = { diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 1ebbce7..7db6be3 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2737,6 +2737,17 @@ struct remote_node_get_cpu_map_ret { int ret; }; +struct remote_domain_get_vcpu_map_args { + remote_nonnull_domain dom; + int need_map; + unsigned int flags; +}; + +struct remote_domain_get_vcpu_map_ret { + opaque cpumap<REMOTE_CPUMAP_MAX>; + int ret; +}; + struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -4434,6 +4445,11 @@ enum remote_procedure { /** * @generate: server */ - REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301 + REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, + + /** + * @generate: none + */ + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302 }; diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index ea38ea2..e1ceabd 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -2186,6 +2186,18 @@ struct remote_node_get_cpu_map_ret { u_int online; int ret; }; +struct remote_domain_get_vcpu_map_args { + remote_nonnull_domain dom; + int need_map; + u_int flags; +}; +struct remote_domain_get_vcpu_map_ret { + struct { + u_int cpumap_len; + char * cpumap_val; + } cpumap; + int ret; +}; struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -2494,4 +2506,5 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_GET_COMPRESSION_CACHE = 299, REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE = 300, REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, }; -- 1.8.2.1

Add support for the virDomainGetVCPUMap API. The code is based on node vcpu mapping. --- tools/virsh-domain-monitor.c | 110 +++++++++++++++++++++++++++++++++++++++++++ tools/virsh.pod | 23 +++++++++ 2 files changed, 133 insertions(+) diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c index 936fa8e..8814705 100644 --- a/tools/virsh-domain-monitor.c +++ b/tools/virsh-domain-monitor.c @@ -1806,6 +1806,111 @@ cleanup: } #undef FILTER +static const vshCmdInfo info_VCPUMap[] = { + {.name = "help", + .data = N_("request map of virtual processors of a domain") + }, + {.name = "desc", + .data = N_("Returns the map of virtual processors of a domain") + }, + {.name = NULL} +}; + +static const vshCmdOptDef opts_VCPUMap[] = { + {.name = "domain", + .type = VSH_OT_DATA, + .flags = VSH_OFLAG_REQ, + .help = N_("domain name, id or uuid") + }, + {.name = "agent", + .type = VSH_OT_BOOL, + .help = N_("use guest agent") + }, + {.name = "hypervisor", + .type = VSH_OT_BOOL, + .help = N_("show hypervisor data") + }, + {.name = "online", + .type = VSH_OT_BOOL, + .help = N_("show online cpus") + }, + {.name = "offlinable", + .type = VSH_OT_BOOL, + .help = N_("show offlinable vCPUs") + }, + {.name = "offline", + .type = VSH_OT_BOOL, + .help = N_("show offline cpus") + }, + {.name = "possible", + .type = VSH_OT_BOOL, + .help = N_("show all possible vCPUs") + }, + {.name = "active", + .type = VSH_OT_BOOL, + .help = N_("show currently active vCPUs") + }, + {.name = NULL} +}; + +static bool +cmdVCPUMap(vshControl *ctl, const vshCmd *cmd) +{ + int cpu, cpunum; + unsigned char *cpumap = NULL; + bool ret = false; + virDomainPtr dom = NULL; + unsigned int flags = 0; + bool agent = vshCommandOptBool(cmd, "agent"); + bool hypervisor = vshCommandOptBool(cmd, "hypervisor"); + bool online = vshCommandOptBool(cmd, "online"); + bool offline = vshCommandOptBool(cmd, "offline"); + + VSH_EXCLUSIVE_OPTIONS_VAR(agent, hypervisor); + VSH_EXCLUSIVE_OPTIONS_VAR(online, offline); + + if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) + return false; + + if (agent) + flags |= VIR_DOMAIN_VCPU_MAP_AGENT; + + if (hypervisor) + flags |= VIR_DOMAIN_VCPU_MAP_HYPERVISOR; + + if (online) + flags |= VIR_DOMAIN_VCPU_MAP_ONLINE; + + if (offline) + flags |= VIR_DOMAIN_VCPU_MAP_OFFLINE; + + if (vshCommandOptBool(cmd, "offlinable")) + flags |= VIR_DOMAIN_VCPU_MAP_OFFLINABLE; + + if (vshCommandOptBool(cmd, "possible")) + flags |= VIR_DOMAIN_VCPU_MAP_POSSIBLE; + + if (vshCommandOptBool(cmd, "active")) + flags |= VIR_DOMAIN_VCPU_MAP_ACTIVE; + + if ((cpunum = virDomainGetVCPUMap(dom, &cpumap, flags)) < 0) { + vshError(ctl, "%s", _("Unable to get cpu map")); + goto cleanup; + } + + vshPrint(ctl, "%-15s ", _("CPU map:")); + for (cpu = 0; cpu < cpunum; cpu++) + vshPrint(ctl, "%c", VIR_CPU_USED(cpumap, cpu) ? 'y' : '-'); + vshPrint(ctl, "\n"); + + ret = true; + +cleanup: + virDomainFree(dom); + VIR_FREE(cpumap); + return ret; +} + const vshCmdDef domMonitoringCmds[] = { {.name = "domblkerror", .handler = cmdDomBlkError, @@ -1879,5 +1984,10 @@ const vshCmdDef domMonitoringCmds[] = { .info = info_list, .flags = 0 }, + {.name = "vcpumap", + .handler = cmdVCPUMap, + .opts = opts_VCPUMap, + .info = info_VCPUMap, + }, {.name = NULL} }; diff --git a/tools/virsh.pod b/tools/virsh.pod index 11984bc..8037a0d 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1797,6 +1797,29 @@ If no flag is specified, behavior is different depending on hypervisor. Output the IP address and port number for the VNC display. If the information is not available the processes will provide an exit code of 1. +=item B<vcpumap> I<domain> [I<--hypervisor> | I<--agent>] +[I<--active> | I<--online> | I<--offline> | I<--offlinable> | I<--possible>] + +Query a map of vCPUs of a domain. + +When I<--agent> is specified the guest agent is queried for the state of the +vCPUs from the point of view of the guest. Otherwise I<--hypervisor> is +assumed. + +If I<--possible> is specified all cpu ID's referenced by the guest are listed. +The I<--offlinable> flag limits the view on vCPUs that can be disabled. +I<--online> and I<--offline> limit the output according to the state of the +vCPU. With I<--active> specified only vCPUs that are active are listed. +The flags may be combined to filter the results further. + +With no flag specified, libvirt assumes that I<--online> and I<--hypervisor> +is requested. + +I<--agent> and I<--hypervisor> as well as I<--online> and I<--offline> are +mutually exclusive options. + +B<Note>: Some flags may not be supported with both query modes. + =back =head1 DEVICE COMMANDS -- 1.8.2.1

Use the agent cpu state code and the upgraded hypervisor vcpu state retrieval code to implement virDomainGetVCPUMap() api. --- src/qemu/qemu_driver.c | 179 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 179 insertions(+) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0041d8f..70acdbb 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -15052,6 +15052,184 @@ qemuNodeSuspendForDuration(virConnectPtr conn ATTRIBUTE_UNUSED, return nodeSuspendForDuration(target, duration, flags); } +#define MATCH(FLAG) (flags & (FLAG)) +static int +qemuDomainGetVCPUMap(virDomainPtr dom, + unsigned char **cpumap, + unsigned int flags) +{ + virQEMUDriverPtr driver = dom->conn->privateData; + virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; + + qemuAgentCPUInfoPtr agentinfo = NULL; + qemuMonitorCPUInfoPtr vcpuinfo = NULL; + int ninfo = -1; + + virBitmapPtr cpus = NULL; + int i; + int ret = -1; + int dummy; + + virCheckFlags(VIR_DOMAIN_VCPU_MAP_HYPERVISOR | + VIR_DOMAIN_VCPU_MAP_AGENT | + VIR_DOMAIN_VCPU_MAP_POSSIBLE | + VIR_DOMAIN_VCPU_MAP_ONLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINE | + VIR_DOMAIN_VCPU_MAP_OFFLINABLE | + VIR_DOMAIN_VCPU_MAP_ACTIVE, -1); + + + if (!(vm = qemuDomObjFromDomain(dom))) + return -1; + + priv = vm->privateData; + + /* request data from the guest */ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + + if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) { + if (!priv->agent) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("guest agent is not configured")); + goto endjob; + } + qemuDomainObjEnterAgent(vm); + ninfo = qemuAgentGetVCPUs(priv->agent, &agentinfo); + qemuDomainObjExitAgent(vm); + } else { + qemuDomainObjEnterMonitor(driver, vm); + ninfo = qemuMonitorGetCPUInfo(priv->mon, &vcpuinfo); + qemuDomainObjExitMonitor(driver, vm); + } + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ninfo < 0) + goto cleanup; + + + if (MATCH(VIR_DOMAIN_VCPU_MAP_AGENT)) { + unsigned int maxcpu = 0; + + if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("qemu guest agent doesn't report active vCPUs ")); + goto cleanup; + } + + /* count cpus */ + for (i = 0; i < ninfo; i++) { + if (agentinfo[i].id > maxcpu) + maxcpu = agentinfo[i].id; + } + + /* allocate the returned array, vCPUs are indexed from 0 */ + if (!(cpus = virBitmapNew(maxcpu + 1))) { + virReportOOMError(); + goto cleanup; + } + + /* VIR_DOMAIN_VCPU_MAP_POSSIBLE */ + for (i = 0; i < ninfo; i++) + ignore_value(virBitmapSetBit(cpus, agentinfo[i].id)); + + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) { + for (i = 0; i < ninfo; i++) { + if (!agentinfo[i].online) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) { + for (i = 0; i < ninfo; i++) { + if (agentinfo[i].online) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) { + for (i = 0; i < ninfo; i++) { + if (!agentinfo[i].offlinable) + ignore_value(virBitmapClearBit(cpus, agentinfo[i].id)); + } + } + } else { + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINABLE)) { + virReportError(VIR_ERR_INVALID_ARG, "%s", + _("qemu driver doesn't support reporting of " + "offlinable vCPUs of the hypervisor")); + goto cleanup; + } + + /* hypervisor cpu stats */ + if (!(cpus = virBitmapNew(vm->def->maxvcpus))) { + virReportOOMError(); + goto cleanup; + } + + /* map active cpus */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_ACTIVE)) { + /* offline vcpus can't be active */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) + goto done; + + for (i = 0; i < ninfo; i++) { + if (vcpuinfo[i].active) + ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id)); + } + + goto done; + } + + /* for native hotplug, all configured vCPUs are possible for hotplug */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_POSSIBLE)) { + virBitmapSetAll(cpus); + goto done; + } + + if (MATCH(VIR_DOMAIN_VCPU_MAP_OFFLINE)) { + /* online and offline together produce an empty map */ + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) + goto done; + + /* set all bit's so we can subtract online cpus from it later */ + virBitmapSetAll(cpus); + } + + for (i = 0; i < ninfo; i++) { + if (MATCH(VIR_DOMAIN_VCPU_MAP_ONLINE)) + ignore_value(virBitmapSetBit(cpus, vcpuinfo[i].id)); + else + ignore_value(virBitmapClearBit(cpus, vcpuinfo[i].id)); + } + } + +done: + if (cpumap && virBitmapToData(cpus, cpumap, &dummy) < 0) + goto cleanup; + + ret = virBitmapSize(cpus); + +cleanup: + if (vm) + virObjectUnlock(vm); + virBitmapFree(cpus); + VIR_FREE(vcpuinfo); + VIR_FREE(agentinfo); + return ret; +} +#undef MATCH static virDriver qemuDriver = { .no = VIR_DRV_QEMU, @@ -15232,6 +15410,7 @@ static virDriver qemuDriver = { .nodeGetCPUMap = qemuNodeGetCPUMap, /* 1.0.0 */ .domainFSTrim = qemuDomainFSTrim, /* 1.0.1 */ .domainOpenChannel = qemuDomainOpenChannel, /* 1.0.2 */ + .domainGetVCPUMap = qemuDomainGetVCPUMap, /* 1.0.7 */ }; -- 1.8.2.1

This patch implements support for the "cpu-add" QMP command that plugs CPUs into a live guest. The "cpu-add" command was introduced in QEMU 1.5. For the hotplug to work machine type "pc-i440fx-1.5" is required. --- src/qemu/qemu_monitor_json.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index 4a69fec..a415732 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -2089,9 +2089,42 @@ cleanup: int qemuMonitorJSONSetCPU(qemuMonitorPtr mon, int cpu, bool online) { - /* XXX Update to use QMP, if QMP ever adds support for cpu hotplug */ + int ret = -1; + virJSONValuePtr cmd = NULL; + virJSONValuePtr reply = NULL; + + if (online) { + cmd = qemuMonitorJSONMakeCommand("cpu-add", + "i:id", cpu, + NULL); + } else { + /* offlining is not yet implemented in qmp */ + goto fallback; + } + if (!cmd) + goto cleanup; + + if ((ret = qemuMonitorJSONCommand(mon, cmd, &reply)) < 0) + goto cleanup; + + if (qemuMonitorJSONHasError(reply, "CommandNotFound")) + goto fallback; + else + ret = qemuMonitorJSONCheckError(cmd, reply); + + /* this function has non-standard return values, so adapt it */ + if (ret == 0) + ret = 1; + +cleanup: + virJSONValueFree(cmd); + virJSONValueFree(reply); + return ret; + +fallback: VIR_DEBUG("no QMP support for cpu_set, trying HMP"); - return qemuMonitorTextSetCPU(mon, cpu, online); + ret = qemuMonitorTextSetCPU(mon, cpu, online); + goto cleanup; } -- 1.8.2.1

This patch introduces API virDomainSetGuestVcpus that will be used to work with vCPU state from the point of view of the guest using the guest agent. --- include/libvirt/libvirt.h.in | 4 ++++ src/driver.h | 8 +++++++ src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 1 + src/remote/remote_driver.c | 1 + src/remote/remote_protocol.x | 15 +++++++++++- src/remote_protocol-structs | 7 ++++++ 7 files changed, 90 insertions(+), 1 deletion(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 46d499c..c4c8224 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -2127,6 +2127,10 @@ int virDomainSetVcpus (virDomainPtr domain, int virDomainSetVcpusFlags (virDomainPtr domain, unsigned int nvcpus, unsigned int flags); +int virDomainSetGuestVcpu (virDomainPtr domain, + unsigned int id, + unsigned int online, + unsigned int flags); int virDomainGetVcpusFlags (virDomainPtr domain, unsigned int flags); diff --git a/src/driver.h b/src/driver.h index 0caa2d6..eca5f1d 100644 --- a/src/driver.h +++ b/src/driver.h @@ -1052,6 +1052,13 @@ typedef int unsigned char **cpumap, unsigned int flags); +typedef int +(*virDrvDomainSetGuestVcpu)(virDomainPtr dom, + unsigned int id, + unsigned int online, + unsigned int flags); + + typedef struct _virDriver virDriver; typedef virDriver *virDriverPtr; @@ -1254,6 +1261,7 @@ struct _virDriver { virDrvDomainSendProcessSignal domainSendProcessSignal; virDrvDomainLxcOpenNamespace domainLxcOpenNamespace; virDrvDomainGetVCPUMap domainGetVCPUMap; + virDrvDomainSetGuestVcpu domainSetGuestVcpu; }; diff --git a/src/libvirt.c b/src/libvirt.c index 3f51e83..9c3bbe6 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -8947,6 +8947,61 @@ error: return -1; } + +/** + * virDomainSetGuestVcpu: + * @domain: pointer to domain object, or NULL for Domain0 + * @id: vcpu ID in the guest + * @online: desired state of the vcpu + * @flags: currently unused, callers should pass 0 + * + * Dynamically change the state of a virtual CPUs used by the domain by + * using the guest agent. The vCPU id used is from the point of view of + * the guest. + * + * Returns 0 in case of success, -1 in case of failure. + */ + +int +virDomainSetGuestVcpu(virDomainPtr domain, + unsigned int id, + unsigned int online, + unsigned int flags) +{ + virConnectPtr conn; + + VIR_DOMAIN_DEBUG(domain, "id=%u, online=%u, flags=%x", id, online, flags); + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN(domain)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + if (domain->conn->flags & VIR_CONNECT_RO) { + virLibDomainError(VIR_ERR_OPERATION_DENIED, __FUNCTION__); + goto error; + } + + conn = domain->conn; + + if (conn->driver->domainSetGuestVcpu) { + int ret; + ret = conn->driver->domainSetGuestVcpu(domain, id, online, flags); + if (ret < 0) + goto error; + return ret; + } + + virLibConnError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(domain->conn); + return -1; +} + + /** * virDomainGetVcpusFlags: * @domain: pointer to domain object, or NULL for Domain0 diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index 04465be..bbb7c77 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -624,6 +624,7 @@ LIBVIRT_1.0.6 { LIBVIRT_1.0.7 { global: virDomainGetVCPUMap; + virDomainSetGuestVcpu; } LIBVIRT_1.0.6; diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index 99fa3c1..4dca3eb 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -6395,6 +6395,7 @@ static virDriver remote_driver = { .domainFSTrim = remoteDomainFSTrim, /* 1.0.1 */ .domainLxcOpenNamespace = remoteDomainLxcOpenNamespace, /* 1.0.2 */ .domainGetVCPUMap = remoteDomainGetVCPUMap, /* 1.0.7 */ + .domainSetGuestVcpu = remoteDomainSetGuestVcpu, /* 1.0.7 */ }; static virNetworkDriver network_driver = { diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 7db6be3..bcc578f 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -2748,6 +2748,13 @@ struct remote_domain_get_vcpu_map_ret { int ret; }; +struct remote_domain_set_guest_vcpu_args { + remote_nonnull_domain dom; + unsigned int id; + unsigned int online; + unsigned int flags; +}; + struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -4450,6 +4457,12 @@ enum remote_procedure { /** * @generate: none */ - REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302 + REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, + + /** + * @generate: both + */ + REMOTE_PROC_DOMAIN_SET_GUEST_VCPU = 303 + }; diff --git a/src/remote_protocol-structs b/src/remote_protocol-structs index e1ceabd..0ded7d8 100644 --- a/src/remote_protocol-structs +++ b/src/remote_protocol-structs @@ -2198,6 +2198,12 @@ struct remote_domain_get_vcpu_map_ret { } cpumap; int ret; }; +struct remote_domain_set_guest_vcpu_args { + remote_nonnull_domain dom; + u_int id; + u_int online; + u_int flags; +}; struct remote_domain_fstrim_args { remote_nonnull_domain dom; remote_string mountPoint; @@ -2507,4 +2513,5 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_SET_COMPRESSION_CACHE = 300, REMOTE_PROC_NODE_DEVICE_DETACH_FLAGS = 301, REMOTE_PROC_DOMAIN_GET_VCPU_MAP = 302, + REMOTE_PROC_DOMAIN_SET_GUEST_VCPU = 303, }; -- 1.8.2.1

Add a virsh command called "setguestvcpu" to excercise the virDomainSetGuestVcpu API. --- tools/virsh-domain.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ tools/virsh.pod | 6 ++++ 2 files changed, 83 insertions(+) diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index 0402aef..6853613 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -5866,6 +5866,77 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd) } /* + * "setguestvcpu" command + */ +static const vshCmdInfo info_setguestvcpu[] = { + {.name = "help", + .data = N_("modify the state of a guest's virtual CPU") + }, + {.name = "desc", + .data = N_("Enable or disable virtual CPUs in a guest using the guest agent.") + }, + {.name = NULL} +}; + +static const vshCmdOptDef opts_setguestvcpu[] = { + {.name = "domain", + .type = VSH_OT_DATA, + .flags = VSH_OFLAG_REQ, + .help = N_("domain name, id or uuid") + }, + {.name = "id", + .type = VSH_OT_INT, + .flags = VSH_OFLAG_REQ, + .help = N_("id of the virtual CPU") + }, + {.name = "online", + .type = VSH_OT_BOOL, + .help = N_("enable the vCPU") + }, + {.name = "offline", + .type = VSH_OT_BOOL, + .help = N_("disable the vCPU") + }, + {.name = NULL} +}; + +static bool +cmdSetguestvcpu(vshControl *ctl, const vshCmd *cmd) +{ + virDomainPtr dom; + unsigned int id; + bool online = vshCommandOptBool(cmd, "online"); + bool offline = vshCommandOptBool(cmd, "offline"); + bool ret = false; + + VSH_EXCLUSIVE_OPTIONS_VAR(online, offline); + + if (!online && !offline) { + vshError(ctl, "%s", + _("need to specify either --online or --offline")); + return false; + } + + if (vshCommandOptUInt(cmd, "id", &id) < 0) { + vshError(ctl, "%s", _("Invalid or missing cpu ID")); + return false; + } + + if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) + return false; + + if (virDomainSetGuestVcpu(dom, id, online, 0) < 0) + goto cleanup; + + ret = true; + +cleanup: + virDomainFree(dom); + return ret; +} + + +/* * "cpu-compare" command */ static const vshCmdInfo info_cpu_compare[] = { @@ -10514,6 +10585,12 @@ const vshCmdDef domManagementCmds[] = { .info = info_setvcpus, .flags = 0 }, + {.name = "setguestvcpu", + .handler = cmdSetguestvcpu, + .opts = opts_setguestvcpu, + .info = info_setguestvcpu, + .flags = 0 + }, {.name = "shutdown", .handler = cmdShutdown, .opts = opts_shutdown, diff --git a/tools/virsh.pod b/tools/virsh.pod index 8037a0d..b34a47d 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1749,6 +1749,12 @@ state of the domain (corresponding to I<--live> if running, or I<--config> if inactive); these three flags are mutually exclusive. Thus, this command always takes exactly zero or two flags. +=item B<setguestvcpu> I<domain> I<cpu id> [{I<--online> | I<--offline>}] + +Set the online state of a vCPU in the guest. This command +requires the guest agent configured. The state may be either +I<--online> or I<--offline>. The flags are mutually exclusive. + =item B<vcpuinfo> I<domain> Returns basic information about the domain virtual CPUs, like the number of -- 1.8.2.1

Use the helper added earlier to implement this new API. --- src/qemu/qemu_driver.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 70acdbb..809807f 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -15231,6 +15231,70 @@ cleanup: } #undef MATCH + +static int +qemuDomainSetGuestVcpu(virDomainPtr dom, + unsigned int id, + unsigned int online, + unsigned int flags) +{ + virQEMUDriverPtr driver = dom->conn->privateData; + virDomainObjPtr vm; + qemuDomainObjPrivatePtr priv; + int ret = -1; + + qemuAgentCPUInfo agentinfo = { .id = id, + .online = !!online }; + + virCheckFlags(0, -1); + + if (!(vm = qemuDomObjFromDomain(dom))) + return -1; + + priv = vm->privateData; + + /* request data from the guest */ + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0) + goto cleanup; + + if (!virDomainObjIsActive(vm)) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", + _("domain is not running")); + goto endjob; + } + + if (!priv->agent) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("guest agent is not configured")); + goto endjob; + } + + qemuDomainObjEnterAgent(vm); + ret = qemuAgentSetVCPUs(priv->agent, &agentinfo, 1); + qemuDomainObjExitAgent(vm); + +endjob: + if (qemuDomainObjEndJob(driver, vm) == 0) + vm = NULL; + + if (ret < 0) + goto cleanup; + + if (ret != 0) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("failed to set state of vCPU '%u'"), id); + goto cleanup; + } + + ret = 0; + +cleanup: + if (vm) + virObjectUnlock(vm); + return ret; +} + + static virDriver qemuDriver = { .no = VIR_DRV_QEMU, .name = QEMU_DRIVER_NAME, @@ -15411,6 +15475,7 @@ static virDriver qemuDriver = { .domainFSTrim = qemuDomainFSTrim, /* 1.0.1 */ .domainOpenChannel = qemuDomainOpenChannel, /* 1.0.2 */ .domainGetVCPUMap = qemuDomainGetVCPUMap, /* 1.0.7 */ + .domainSetGuestVcpu = qemuDomainSetGuestVcpu, /* 1.0.7 */ }; -- 1.8.2.1

On 05/27/13 19:59, Peter Krempa wrote:
This patch series consists of multiple parts: -patch 1,2: Two trivial cleanups -patch 3: Improve and refactor vCPU data parsing -patch 4: Add agent monitor helpers for cpu-hotplug stuff -patch 5,6,7: Implement universal guest vCPU mapping function -patch 8: Implement new qemu monitor command for cpu hotplug -patch 9,10,11: Implement agent based cpu state manipulation API/command
Tip: The new "virsh vcpumap guest --active" command may be used that the agent actually offlined the vCPU in the guest.
Peter Krempa (11): virsh-domain-monitor: Remove ATTRIBUTE_UNUSED from a argument qemu: Use bool instead of int in qemuMonitorSetCPU APIs qemu: Extract more information about vCPUs and threads qemu_agent: Introduce helpers for agent based CPU hot(un)plug lib: Add API to map virtual cpus of a guest virsh-domain-monitor: Implement command to map guest vCPUs qemu: Implement virDomainGetVCPUMap for the qemu driver qemu: Implement new QMP command for cpu hotplug lib: Add API to modify vCPU state from the guest using the guest agent virsh-domain: Implement command for virDomainSetGuestVcpu qemu: Implement virDomainSetGuetVcpu in qemu driver
Ping. Now that 1.0.6 is out of the door this series could get some reviews ;). Thanks. Peter
participants (1)
-
Peter Krempa