On 04/21/2015 09:45 AM, Peter Krempa wrote:
On Tue, Apr 14, 2015 at 21:18:25 -0400, John Ferlan wrote:
> Add qemuDomainAddIOThread and qemuDomainDelIOThread in order to add or
> remove an IOThread to/from the host either for live or config optoins
>
> The implementation for the 'live' option will use the iothreadpids list
> in order to make decision, while the 'config' option will use the
> iothreadids list. Additionally, for deletion each may have to adjust
> the iothreadpin list.
>
> IOThreads are implemented by qmp objects, the code makes use of the existing
> qemuMonitorAddObject or qemuMonitorDelObject APIs.
>
> Signed-off-by: John Ferlan <jferlan(a)redhat.com>
> ---
> src/conf/domain_audit.c | 9 +
> src/conf/domain_audit.h | 6 +
> src/libvirt_private.syms | 1 +
> src/qemu/qemu_driver.c | 431 +++++++++++++++++++++++++++++++++++++++++++++++
> 4 files changed, 447 insertions(+)
>
...
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index 008258f..f42d4fb 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -6179,6 +6179,435 @@ qemuDomainPinIOThread(virDomainPtr dom,
> return ret;
> }
>
> +static int
> +qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> + unsigned int iothread_id)
> +{
> + qemuDomainObjPrivatePtr priv = vm->privateData;
> + char *alias = NULL;
> + size_t idx;
> + int rc = -1;
> + int ret = -1;
> + unsigned int orig_niothreads = vm->def->iothreads;
> + unsigned int exp_niothreads = vm->def->iothreads;
> + int new_niothreads = 0;
> + qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
> + unsigned int idval = 0;
> + virCgroupPtr cgroup_iothread = NULL;
> + char *mem_mask = NULL;
> + virDomainIOThreadIDDefPtr iothrid;
> +
> + if (virDomainIOThreadIDFind(vm->def, iothread_id)) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("an IOThread is already using iothread_id
'%u'"),
> + iothread_id);
> + goto cleanup;
> + }
> +
> + if (virAsprintf(&alias, "iothread%u", iothread_id) < 0)
> + return -1;
> +
> + qemuDomainObjEnterMonitor(driver, vm);
> +
> + rc = qemuMonitorAddObject(priv->mon, "iothread", alias, NULL);
> + exp_niothreads++;
> + if (rc < 0)
> + goto exit_monitor;
> +
> + /* After hotplugging the IOThreads we need to re-detect the
> + * IOThreads thread_id's, adjust the cgroups, thread affinity,
> + * and add the thread_id to the vm->def->iothreadids list.
> + */
> + if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
> + &new_iothreads)) < 0)
Since we are not doing any fancy iothread naming, this function can
parse the iothread IDs from the alias right away ... [1]
> + goto exit_monitor;
> +
> + if (qemuDomainObjExitMonitor(driver, vm) < 0)
> + goto cleanup;
> +
> + if (new_niothreads != exp_niothreads) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("got wrong number of IOThread ids from QEMU monitor.
"
> + "got %d, wanted %d"),
> + new_niothreads, exp_niothreads);
> + vm->def->iothreads = new_niothreads;
> + goto cleanup;
> + }
> + vm->def->iothreads = exp_niothreads;
> +
> + if (virDomainNumatuneGetMode(vm->def->numa, -1) ==
> + VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
> + virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
> + priv->autoNodeset,
> + &mem_mask, -1) < 0)
> + goto cleanup;
> +
> +
> + /*
> + * If we've successfully added an IOThread, find out where we added it
> + * in the QEMU IOThread list, so we can add it to our iothreadids list
> + */
The message seems obvious when looking at the code.
> + for (idx = 0; idx < new_niothreads; idx++) {
> + if (qemuDomainParseIOThreadAlias(new_iothreads[idx]->name, &idval)
< 0)
... [1] so that you don't have to do it manually.
IOW:
if (STREQ(alias, new_iothreads[idx]->name))
break
> + goto cleanup;
> + if (iothread_id == idval)
> + break;
> + }
> +
> + if (idval != iothread_id) {
And of course idx != new_niothreads, plus removing 'idval'
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("cannot find new IOThread '%u' in QEMU
monitor."),
> + iothread_id);
> + goto cleanup;
> + }
> +
> + if (virDomainIOThreadIDAdd(vm->def, iothread_id) < 0)
virDomainIOThreadIDAdd could return the pointer to the created item ...
OK, now we have
if (!(iothrid = virDomainIOThreadIDAdd(vm->def, iothread_id)))
goto cleanup;
iothrid->thread_id = new_iothreads[idx]->thread_id;
> + goto cleanup;
> +
> + if (!(iothrid = virDomainIOThreadIDFind(vm->def, iothread_id))) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("cannot find just added IOThread '%u'"),
> + iothread_id);
So that you don't have to look it up right after adding it.
> + goto cleanup;
> + }
> +
> + iothrid->thread_id = new_iothreads[idx]->thread_id;
> +
> + /* Add IOThread to cgroup if present */
> + if (priv->cgroup) {
> + cgroup_iothread =
> + qemuDomainAddCgroupForThread(priv->cgroup,
> + VIR_CGROUP_THREAD_IOTHREAD,
> + iothread_id, mem_mask,
> + iothrid->thread_id);
> + if (!cgroup_iothread)
> + goto cleanup;
> + }
> +
> + /* Inherit def->cpuset */
> + if (vm->def->cpumask) {
Automatic NUMA placement(priv->autoCpuset) needs to be taken into account too.
Meaning? There's a reference for that in qemuDomainAddCgroupForThread.
Obviously I'm following the model of qemuDomainHotplugVcpus when
"if (nvcpus > oldvcpus) {"
> + if (qemuDomainHotplugAddPin(vm->def->cpumask,
iothread_id,
> + &vm->def->cputune.iothreadspin,
> + &vm->def->cputune.niothreadspin) <
0)
> +
> + goto cleanup;
> +
> + if (qemuDomainHotplugPinThread(vm->def->cpumask, iothread_id,
> + iothrid->thread_id, cgroup_iothread) <
0)
> + goto cleanup;
> +
> + if (qemuProcessSetSchedParams(iothread_id, iothrid->thread_id,
> + vm->def->cputune.niothreadsched,
> + vm->def->cputune.iothreadsched) < 0)
qemuProcessSetSchedParams won't do anything since the new thread doesn't
have any scheduler assigned.
So it's wrong in qemuDomainHotplugVcpus ? Is someone else fixing that?
Is the expectation that I just remove this?
> + goto cleanup;
> + }
> +
> + ret = 0;
> +
> + cleanup:
> + if (new_iothreads) {
> + for (idx = 0; idx < new_niothreads; idx++)
> + qemuMonitorIOThreadInfoFree(new_iothreads[idx]);
> + VIR_FREE(new_iothreads);
> + }
> + VIR_FREE(mem_mask);
> + virDomainAuditIOThread(vm, orig_niothreads, new_niothreads,
> + "update", rc == 0);
> + if (cgroup_iothread)
> + virCgroupFree(&cgroup_iothread);
virCgroupFree() handles NULL just fine.
OK - again copied from qemuDomainHotplugVcpus
> + VIR_FREE(alias);
> + return ret;
> +
> + exit_monitor:
> + ignore_value(qemuDomainObjExitMonitor(driver, vm));
> + goto cleanup;
> +}
> +
> +static int
> +qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> + unsigned int iothread_id)
> +{
> + qemuDomainObjPrivatePtr priv = vm->privateData;
> + size_t idx;
> + char *alias = NULL;
> + int rc = -1;
> + int ret = -1;
> + unsigned int orig_niothreads = vm->def->iothreads;
> + unsigned int exp_niothreads = vm->def->iothreads;
> + int new_niothreads = 0;
> + qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
> + char *mem_mask = NULL;
> +
> + /* Normally would use virDomainIOThreadIDFind, but we need the index
> + * from whence to delete for later...
> + */
> + for (idx = 0; idx < vm->def->niothreadids; idx++) {
> + if (iothread_id == vm->def->iothreadids[idx]->iothread_id)
> + break;
> + }
> +
> + if (idx == vm->def->niothreadids) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("cannot find IOThread '%u' in iothreadids
list"),
> + iothread_id);
> + return -1;
> + }
> +
> + if (virAsprintf(&alias, "iothread%u", iothread_id) < 0)
> + return -1;
> +
> + qemuDomainObjEnterMonitor(driver, vm);
> +
> + rc = qemuMonitorDelObject(priv->mon, alias);
> + exp_niothreads--;
> + if (rc < 0)
> + goto exit_monitor;
> +
> + /* After hotplugging the IOThreads we need to re-detect the
> + * IOThreads thread_id's, adjust the cgroups, thread affinity,
> + * and the vm->def->iothreadids list.
> + */
You've removed the thread here, so thread affinity was destroyed by the
thread exitting.
relic of me cut-n-paste from formerly combined code.
removed the comment completely, no replacement.
> + if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
> + &new_iothreads)) < 0)
> + goto exit_monitor;
> +
> + if (qemuDomainObjExitMonitor(driver, vm) < 0)
> + goto cleanup;
> +
> + if (new_niothreads != exp_niothreads) {
> + virReportError(VIR_ERR_INTERNAL_ERROR,
> + _("got wrong number of IOThread ids from QEMU monitor.
"
> + "got %d, wanted %d"),
> + new_niothreads, exp_niothreads);
> + vm->def->iothreads = new_niothreads;
> + goto cleanup;
> + }
> + vm->def->iothreads = exp_niothreads;
> +
> + if (virDomainNumatuneGetMode(vm->def->numa, -1) ==
> + VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
> + virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
> + priv->autoNodeset,
> + &mem_mask, -1) < 0)
Why do you need the memory node mask when you are deleting the cgroup?
Oh right - relic of the copy... gone.
> + goto cleanup;
> +
> + if (VIR_DELETE_ELEMENT(vm->def->iothreadids, idx,
> + vm->def->niothreadids) < 0)
> + goto cleanup;
You've added virDomainIOThreadIDDel
yep, change to :
virDomainIOThreadIDDel(vm->def, iothread_id);
> +
> + if (qemuDomainDelCgroupForThread(priv->cgroup,
> + VIR_CGROUP_THREAD_IOTHREAD,
> + iothread_id) < 0)
> + goto cleanup;
> +
> + virDomainPinDel(&vm->def->cputune.iothreadspin,
> + &vm->def->cputune.niothreadspin,
> + iothread_id);
> +
> + ret = 0;
> +
> + cleanup:
> + if (new_iothreads) {
> + for (idx = 0; idx < new_niothreads; idx++)
> + qemuMonitorIOThreadInfoFree(new_iothreads[idx]);
> + VIR_FREE(new_iothreads);
> + }
> + VIR_FREE(mem_mask);
> + virDomainAuditIOThread(vm, orig_niothreads, new_niothreads,
> + "update", rc == 0);
> + VIR_FREE(alias);
> + return ret;
> +
> + exit_monitor:
> + ignore_value(qemuDomainObjExitMonitor(driver, vm));
> + goto cleanup;
> +}
> +
> +static int
> +qemuDomainChgIOThread(virQEMUDriverPtr driver,
> + virDomainObjPtr vm,
> + unsigned int iothread_id,
> + bool add,
> + unsigned int flags)
> +{
> + virQEMUDriverConfigPtr cfg = NULL;
> + virCapsPtr caps = NULL;
> + qemuDomainObjPrivatePtr priv;
> + virCgroupPtr cgroup_temp = NULL;
> + virBitmapPtr all_nodes = NULL;
> + char *all_nodes_str = NULL;
> + char *mem_mask = NULL;
> + virDomainDefPtr persistentDef;
> + int ret = -1;
> +
> + cfg = virQEMUDriverGetConfig(driver);
> +
> + if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
> + goto cleanup;
> +
> + priv = vm->privateData;
> +
> + if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
> + goto cleanup;
> +
> + if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
> + &persistentDef) < 0)
> + goto endjob;
> +
> + if (flags & VIR_DOMAIN_AFFECT_LIVE) {
> + if (!virDomainObjIsActive(vm)) {
> + virReportError(VIR_ERR_OPERATION_INVALID, "%s",
> + _("cannot change IOThreads for an inactive
domain"));
> + goto endjob;
> + }
> +
> + if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
> + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
> + _("IOThreads not supported with this
binary"));
> + goto endjob;
> + }
> +
> + if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
Wrong cgroup type. Additionally qemuDomainHotplugAddIOThread() will add
the thread so adding it here doesn't make sense.
See qemuDomainSetVcpusFlags, it too uses VIR_CGROUP_THREAD_EMULATOR so this
is no different... In fact I suppose I could have created some sort of common
API, but didn't.
One thing I just noticed - commit id '6cf1e11' added a "&&
virNumaIsAvailable()"
check in qemuDomainSetVcpusFlags, so I suppose to follow that - I can add
the extra check.
Beyond that I'm not sure what you want.
> + false, &cgroup_temp) < 0)
> + goto endjob;
> +
> + if (!(all_nodes = virNumaGetHostNodeset()))
> + goto endjob;
> +
> + if (!(all_nodes_str = virBitmapFormat(all_nodes)))
> + goto endjob;
> +
> + if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 ||
> + virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0)
> + goto endjob;
> +
> + if (add) {
> + if (qemuDomainHotplugAddIOThread(driver, vm, iothread_id) < 0)
> + goto endjob;
> + } else {
> + if (qemuDomainHotplugDelIOThread(driver, vm, iothread_id) < 0)
> + goto endjob;
> + }
> +
> + if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
> + goto endjob;
> + }
> +
> + if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
> + if (add) {
> + if (virDomainIOThreadIDAdd(persistentDef, iothread_id) < 0)
> + goto endjob;
> +
> + /* Nothing to do in iothreadspin list (that's a separate command)
*/
> +
> + persistentDef->iothreads++;
> + } else {
> + if (!virDomainIOThreadIDFind(persistentDef, iothread_id)) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("cannot find IOThread '%u' in
persistent "
> + "iothreadids"),
> + iothread_id);
> + goto cleanup;
> + }
> +
> + virDomainIOThreadIDDel(persistentDef, iothread_id);
> + virDomainPinDel(&persistentDef->cputune.iothreadspin,
> + &persistentDef->cputune.niothreadspin,
> + iothread_id);
This is the reason why I've requested in the previous review that the
pinning information would be merged into the iothread data structure.
You then would not have to synchronise two data structures.
Understood ... I started down that path, but then got bogged down in cputune
information inside iothreadid's and the difference with the vCPU code. So I
kept it this way to reduce the number of changes.
I think it's worthy of being done, but I hope a follow-up patch will be acceptable.
> + persistentDef->iothreads--;
> + }
> +
> + if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0)
> + goto endjob;
> + }
> +
> + ret = 0;
> +
> + endjob:
> + if (mem_mask) {
> + virErrorPtr err = virSaveLastError();
> + virCgroupSetCpusetMems(cgroup_temp, mem_mask);
> + virSetError(err);
> + virFreeError(err);
> + }
> + qemuDomainObjEndJob(driver, vm);
> +
> + cleanup:
> + VIR_FREE(mem_mask);
> + VIR_FREE(all_nodes_str);
> + virBitmapFree(all_nodes);
> + virCgroupFree(&cgroup_temp);
> + virObjectUnref(caps);
> + virObjectUnref(cfg);
> + return ret;
> +}
> +
> +static int
> +qemuDomainAddIOThread(virDomainPtr dom,
> + unsigned int iothread_id,
> + unsigned int flags)
> +{
> + virQEMUDriverPtr driver = dom->conn->privateData;
> + virDomainObjPtr vm = NULL;
> + int ret = -1;
> +
> + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
> + VIR_DOMAIN_AFFECT_CONFIG, -1);
> +
> + if (!(vm = qemuDomObjFromDomain(dom)))
> + goto cleanup;
> +
> + if (virDomainAddIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
> + goto cleanup;
> +
> + ret = qemuDomainChgIOThread(driver, vm, iothread_id, true, flags);
> +
> + cleanup:
> + qemuDomObjEndAPI(&vm);
> + return ret;
> +}
> +
> +
> +static int
> +qemuDomainDelIOThread(virDomainPtr dom,
> + unsigned int iothread_id,
> + unsigned int flags)
> +{
> + virQEMUDriverPtr driver = dom->conn->privateData;
> + virDomainObjPtr vm = NULL;
> + int ret = -1;
> + size_t i;
> +
> + virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
> + VIR_DOMAIN_AFFECT_CONFIG, -1);
> +
> + if (!(vm = qemuDomObjFromDomain(dom)))
> + goto cleanup;
> +
> + if (virDomainDelIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
> + goto cleanup;
> +
> + /* If there is a disk using the IOThread to be removed, then fail. */
> + for (i = 0; i < vm->def->ndisks; i++) {
> + if (vm->def->disks[i]->iothread == iothread_id) {
> + virReportError(VIR_ERR_INVALID_ARG,
> + _("cannot remove IOThread %u since it "
> + "is being used by disk path '%s'"),
> + iothread_id,
> + NULLSTR(vm->def->disks[i]->src->path));
Alternatively you can use vm->def->disks[i]->dst which should be always
set.
OK
> + goto cleanup;
> + }
> + }
> +
> + ret = qemuDomainChgIOThread(driver, vm, iothread_id, false, flags);
> +
> + cleanup:
> + qemuDomObjEndAPI(&vm);
> + return ret;
> +}
> +
> static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr
seclabel)
> {
> virQEMUDriverPtr driver = dom->conn->privateData;
So with all that the changes that would be sqashed in:
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 5d3c7fc..e1fdeb5 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6188,7 +6188,6 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
unsigned int exp_niothreads = vm->def->iothreads;
int new_niothreads = 0;
qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
- unsigned int idval = 0;
virCgroupPtr cgroup_iothread = NULL;
char *mem_mask = NULL;
virDomainIOThreadIDDefPtr iothrid;
@@ -6244,29 +6243,20 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
* in the QEMU IOThread list, so we can add it to our iothreadids list
*/
for (idx = 0; idx < new_niothreads; idx++) {
- if (qemuDomainParseIOThreadAlias(new_iothreads[idx]->name, &idval) <
0)
- goto cleanup;
- if (iothread_id == idval)
+ if (STREQ(new_iothreads[idx]->name, alias))
break;
}
- if (idval != iothread_id) {
+ if (idx != new_niothreads) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("cannot find new IOThread '%u' in QEMU
monitor."),
iothread_id);
goto cleanup;
}
- if (virDomainIOThreadIDAdd(vm->def, iothread_id) < 0)
+ if (!(iothrid = virDomainIOThreadIDAdd(vm->def, iothread_id)))
goto cleanup;
- if (!(iothrid = virDomainIOThreadIDFind(vm->def, iothread_id))) {
- virReportError(VIR_ERR_INTERNAL_ERROR,
- _("cannot find just added IOThread '%u'"),
- iothread_id);
- goto cleanup;
- }
-
iothrid->thread_id = new_iothreads[idx]->thread_id;
/* Add IOThread to cgroup if present */
@@ -6309,8 +6299,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
VIR_FREE(mem_mask);
virDomainAuditIOThread(vm, orig_niothreads, new_niothreads,
"update", rc == 0);
- if (cgroup_iothread)
- virCgroupFree(&cgroup_iothread);
+ virCgroupFree(&cgroup_iothread);
VIR_FREE(alias);
return ret;
@@ -6333,7 +6322,6 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
unsigned int exp_niothreads = vm->def->iothreads;
int new_niothreads = 0;
qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
- char *mem_mask = NULL;
/* Normally would use virDomainIOThreadIDFind, but we need the index
* from whence to delete for later...
@@ -6360,10 +6348,6 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
if (rc < 0)
goto exit_monitor;
- /* After hotplugging the IOThreads we need to re-detect the
- * IOThreads thread_id's, adjust the cgroups, thread affinity,
- * and the vm->def->iothreadids list.
- */
if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
&new_iothreads)) < 0)
goto exit_monitor;
@@ -6381,16 +6365,7 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
}
vm->def->iothreads = exp_niothreads;
- if (virDomainNumatuneGetMode(vm->def->numa, -1) ==
- VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
- virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
- priv->autoNodeset,
- &mem_mask, -1) < 0)
- goto cleanup;
-
- if (VIR_DELETE_ELEMENT(vm->def->iothreadids, idx,
- vm->def->niothreadids) < 0)
- goto cleanup;
+ virDomainIOThreadIDDel(vm->def, iothread_id);
if (qemuDomainDelCgroupForThread(priv->cgroup,
VIR_CGROUP_THREAD_IOTHREAD,
@@ -6409,7 +6384,6 @@ qemuDomainHotplugDelIOThread(virQEMUDriverPtr driver,
qemuMonitorIOThreadInfoFree(new_iothreads[idx]);
VIR_FREE(new_iothreads);
}
- VIR_FREE(mem_mask);
virDomainAuditIOThread(vm, orig_niothreads, new_niothreads,
"update", rc == 0);
VIR_FREE(alias);
@@ -6464,19 +6438,21 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver,
goto endjob;
}
- if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
- false, &cgroup_temp) < 0)
- goto endjob;
+ if (virNumaIsAvailable()) {
+ if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
+ false, &cgroup_temp) < 0)
+ goto endjob;
- if (!(all_nodes = virNumaGetHostNodeset()))
- goto endjob;
+ if (!(all_nodes = virNumaGetHostNodeset()))
+ goto endjob;
- if (!(all_nodes_str = virBitmapFormat(all_nodes)))
- goto endjob;
+ if (!(all_nodes_str = virBitmapFormat(all_nodes)))
+ goto endjob;
- if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 ||
- virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0)
- goto endjob;
+ if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 ||
+ virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0)
+ goto endjob;
+ }
if (add) {
if (qemuDomainHotplugAddIOThread(driver, vm, iothread_id) < 0)
@@ -6492,7 +6468,7 @@ qemuDomainChgIOThread(virQEMUDriverPtr driver,
if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
if (add) {
- if (virDomainIOThreadIDAdd(persistentDef, iothread_id) < 0)
+ if (!virDomainIOThreadIDAdd(persistentDef, iothread_id))
goto endjob;
/* Nothing to do in iothreadspin list (that's a separate command) */
@@ -6590,8 +6566,7 @@ qemuDomainDelIOThread(virDomainPtr dom,
virReportError(VIR_ERR_INVALID_ARG,
_("cannot remove IOThread %u since it "
"is being used by disk path '%s'"),
- iothread_id,
- NULLSTR(vm->def->disks[i]->src->path));
+ iothread_id, vm->def->disks[i]->dst);
goto cleanup;
}
}