[libvirt] [PATCH 0/2] Warn users with weird thread/memory alignment

People are doing weird things. Let's let them do what they want but gently warn them in case they did not want that exact behaviour and are looking for some possible problems. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1254402 Martin Kletzander (2): qemu: Add qemuProcessSetupPid() qemu: Check for thread <=> memory alignment src/qemu/qemu_driver.c | 4 +- src/qemu/qemu_process.c | 375 ++++++++++++++++++++++-------------------------- src/qemu/qemu_process.h | 6 +- 3 files changed, 181 insertions(+), 204 deletions(-) -- 2.9.0

Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well. Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-) diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 215fe5f2f210..d1247d2fd0f9 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,70 +2306,108 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, } +/** + * qemuProcessSetupPid: + * + * This function sets resource properities (affinity, cgroups, + * scheduler) for any PID associated with a domain. It should be used + * to set up emulator PIDs as well as vCPU and I/O thread pids to + * ensure they are all handled the same way. + * + * Returns 0 on success, -1 on error. + */ static int -qemuProcessSetupEmulator(virDomainObjPtr vm) +qemuProcessSetupPid(virDomainObjPtr vm, + pid_t pid, + virCgroupThreadName nameval, + int id, + virBitmapPtr cpumask, + int sched_policy, + int sched_priority) { - virBitmapPtr cpumask = NULL; - virCgroupPtr cgroup_emulator = NULL; qemuDomainObjPrivatePtr priv = vm->privateData; - unsigned long long period = vm->def->cputune.emulator_period; - long long quota = vm->def->cputune.emulator_quota; + unsigned long long period = vm->def->cputune.period; + long long quota = vm->def->cputune.quota; + virDomainNumatuneMemMode mem_mode; + virCgroupPtr cgroup = NULL; + virBitmapPtr use_cpumask; + char *mem_mask = NULL; int ret = -1; if ((period || quota) && !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", _("cgroup cpu is required for scheduler tuning")); - return -1; + goto cleanup; } - if (vm->def->cputune.emulatorpin) - cpumask = vm->def->cputune.emulatorpin; - else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO && - priv->autoCpuset) - cpumask = priv->autoCpuset; + /* Infer which cpumask shall be used. */ + if (cpumask) + use_cpumask = cpumask; + else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) + use_cpumask = priv->autoCpuset; else - cpumask = vm->def->cpumask; + use_cpumask = vm->def->cpumask; - /* If CPU cgroup controller is not initialized here, then we need + /* + * If CPU cgroup controller is not initialized here, then we need * neither period nor quota settings. And if CPUSET controller is - * not initialized either, then there's nothing to do anyway. */ + * not initialized either, then there's nothing to do anyway. + */ if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) || virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { - if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0, - true, &cgroup_emulator) < 0) + if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && + mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && + virDomainNumatuneMaybeFormatNodeset(vm->def->numa, + priv->autoNodeset, + &mem_mask, -1) < 0) goto cleanup; - if (virCgroupAddTask(cgroup_emulator, vm->pid) < 0) + if (virCgroupNewThread(priv->cgroup, nameval, id, true, &cgroup) < 0) goto cleanup; + /* move the thread for vcpu to sub dir */ + if (virCgroupAddTask(cgroup, pid) < 0) + goto cleanup; - if (cpumask) { - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET) && - qemuSetupCgroupCpusetCpus(cgroup_emulator, cpumask) < 0) + if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { + if (use_cpumask && + qemuSetupCgroupCpusetCpus(cgroup, use_cpumask) < 0) goto cleanup; - } - if (period || quota) { - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) && - qemuSetupCgroupVcpuBW(cgroup_emulator, period, - quota) < 0) + /* + * Don't setup cpuset.mems for the emulator, they need to + * be set up after initialization in order for kvm + * allocations to succeed. + */ + if (nameval != VIR_CGROUP_THREAD_EMULATOR && + mem_mask && virCgroupSetCpusetMems(cgroup, mem_mask) < 0) goto cleanup; + } + + if ((period || quota) && + qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0) + goto cleanup; } - if (cpumask && - virProcessSetAffinity(vm->pid, cpumask) < 0) + /* Setup legacy affinity. */ + if (use_cpumask && virProcessSetAffinity(pid, use_cpumask) < 0) goto cleanup; - ret = 0; + /* Set scheduler type and priority. */ + if (sched_policy != VIR_PROC_POLICY_NONE && + virProcessSetScheduler(pid, sched_policy, sched_priority) < 0) + goto cleanup; + ret = 0; cleanup: - if (cgroup_emulator) { + VIR_FREE(mem_mask); + if (cgroup) { if (ret < 0) - virCgroupRemove(cgroup_emulator); - virCgroupFree(&cgroup_emulator); + virCgroupRemove(cgroup); + virCgroupFree(&cgroup); } return ret; @@ -2377,6 +2415,15 @@ qemuProcessSetupEmulator(virDomainObjPtr vm) static int +qemuProcessSetupEmulator(virDomainObjPtr vm) +{ + return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR, + 0, vm->def->cputune.emulatorpin, + VIR_PROC_POLICY_NONE, 0); +} + + +static int qemuProcessInitPasswords(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, @@ -4574,80 +4621,10 @@ qemuProcessSetupVcpu(virDomainObjPtr vm, { pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid); virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid); - qemuDomainObjPrivatePtr priv = vm->privateData; - char *mem_mask = NULL; - virDomainNumatuneMemMode mem_mode; - unsigned long long period = vm->def->cputune.period; - long long quota = vm->def->cputune.quota; - virCgroupPtr cgroup_vcpu = NULL; - virBitmapPtr cpumask; - int ret = -1; - - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) || - virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { - - if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && - mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && - virDomainNumatuneMaybeFormatNodeset(vm->def->numa, - priv->autoNodeset, - &mem_mask, -1) < 0) - goto cleanup; - - if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_VCPU, vcpuid, - true, &cgroup_vcpu) < 0) - goto cleanup; - - if (period || quota) { - if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0) - goto cleanup; - } - } - - /* infer which cpumask shall be used */ - if (vcpu->cpumask) - cpumask = vcpu->cpumask; - else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) - cpumask = priv->autoCpuset; - else - cpumask = vm->def->cpumask; - - /* setup cgroups */ - if (cgroup_vcpu) { - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { - if (mem_mask && virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0) - goto cleanup; - - if (cpumask && qemuSetupCgroupCpusetCpus(cgroup_vcpu, cpumask) < 0) - goto cleanup; - } - - /* move the thread for vcpu to sub dir */ - if (virCgroupAddTask(cgroup_vcpu, vcpupid) < 0) - goto cleanup; - } - - /* setup legacy affinty */ - if (cpumask && virProcessSetAffinity(vcpupid, cpumask) < 0) - goto cleanup; - - /* set scheduler type and priority */ - if (vcpu->sched.policy != VIR_PROC_POLICY_NONE) { - if (virProcessSetScheduler(vcpupid, vcpu->sched.policy, - vcpu->sched.priority) < 0) - goto cleanup; - } - - ret = 0; - - cleanup: - VIR_FREE(mem_mask); - if (cgroup_vcpu) { - if (ret < 0) - virCgroupRemove(cgroup_vcpu); - virCgroupFree(&cgroup_vcpu); - } - return ret; + return qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU, + vcpuid, vcpu->cpumask, + vcpu->sched.policy, vcpu->sched.priority); } @@ -4700,98 +4677,17 @@ qemuProcessSetupVcpus(virDomainObjPtr vm) } -/** - * qemuProcessSetupIOThread: - * @vm: domain object - * @iothread: iothread data structure to set the data for - * - * This function sets resource properities (affinity, cgroups, scheduler) for a - * IOThread. This function expects that the IOThread is online and the IOThread - * pids were correctly detected at the point when it's called. - * - * Returns 0 on success, -1 on error. - */ int qemuProcessSetupIOThread(virDomainObjPtr vm, virDomainIOThreadIDDefPtr iothread) { - qemuDomainObjPrivatePtr priv = vm->privateData; - unsigned long long period = vm->def->cputune.period; - long long quota = vm->def->cputune.quota; - virDomainNumatuneMemMode mem_mode; - char *mem_mask = NULL; - virCgroupPtr cgroup_iothread = NULL; - virBitmapPtr cpumask = NULL; - int ret = -1; - - if ((period || quota) && - !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) { - virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", - _("cgroup cpu is required for scheduler tuning")); - return -1; - } - - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU) || - virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { - if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) == 0 && - mem_mode == VIR_DOMAIN_NUMATUNE_MEM_STRICT && - virDomainNumatuneMaybeFormatNodeset(vm->def->numa, - priv->autoNodeset, - &mem_mask, -1) < 0) - goto cleanup; - if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_IOTHREAD, + return qemuProcessSetupPid(vm, iothread->thread_id, + VIR_CGROUP_THREAD_IOTHREAD, iothread->iothread_id, - true, &cgroup_iothread) < 0) - goto cleanup; - } - - if (iothread->cpumask) - cpumask = iothread->cpumask; - else if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO) - cpumask = priv->autoCpuset; - else - cpumask = vm->def->cpumask; - - if (period || quota) { - if (qemuSetupCgroupVcpuBW(cgroup_iothread, period, quota) < 0) - goto cleanup; - } - - if (cgroup_iothread) { - if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) { - if (mem_mask && - virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0) - goto cleanup; - - if (cpumask && - qemuSetupCgroupCpusetCpus(cgroup_iothread, cpumask) < 0) - goto cleanup; - } - - if (virCgroupAddTask(cgroup_iothread, iothread->thread_id) < 0) - goto cleanup; - } - - if (cpumask && virProcessSetAffinity(iothread->thread_id, cpumask) < 0) - goto cleanup; - - if (iothread->sched.policy != VIR_PROC_POLICY_NONE && - virProcessSetScheduler(iothread->thread_id, iothread->sched.policy, - iothread->sched.priority) < 0) - goto cleanup; - - ret = 0; - - cleanup: - if (cgroup_iothread) { - if (ret < 0) - virCgroupRemove(cgroup_iothread); - virCgroupFree(&cgroup_iothread); - } - - VIR_FREE(mem_mask); - return ret; + iothread->cpumask, + iothread->sched.policy, + iothread->sched.priority); } @@ -5292,7 +5188,7 @@ qemuProcessLaunch(virConnectPtr conn, if (!qemuProcessVerifyGuestCPU(driver, vm, asyncJob)) goto cleanup; - VIR_DEBUG("Setting up post-init cgroup restrictions"); + VIR_DEBUG("Setting Post-init cgroup resctrictions"); if (qemuSetupCpusetMems(vm) < 0) goto cleanup; -- 2.9.0

On 06/22/2016 12:37 PM, Martin Kletzander wrote:
Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-)
Would have been so much easier one at a time... The scroll wheel on my mouse needs a break. I think this looks so much more logical. The only other comment I have is regarding the virCgroupAddTask call ordering... If I'm reading right, prior to this set of changes the emulator code would call that right after virCgroupNewThread and then make the qemuSetupCgroupCpusetCpus and virCgroupHasController calls while vcpus and iothreads would call virCgroupAddTask after all cgroup related calls. I mention this mainly because I have a feint (or faint) recollection of there being issues when regarding ordering of calls and what gets copied when (details I really tried to forget). Since emulator is the pid and vcpu/iothread are tid's I'm just being overly cautious that there's a difference in ordering that isn't readily apparent. This looks reasonable to me, but hopefully someone else will chime in regarding the order of virCgroupNewThread, virCgroupAddTask, qemuSetupCgroupCpusetCpus, virCgroupSetCpusetMems, and qemuSetupCgroupVcpuBW prior to calling virProcessSetAffinity Consider it a fragile ACK - John

On Thu, Jun 23, 2016 at 03:06:50PM -0400, John Ferlan wrote:
On 06/22/2016 12:37 PM, Martin Kletzander wrote:
Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-)
Would have been so much easier one at a time... The scroll wheel on my mouse needs a break.
Yeah, I tried several different splits and none of them worked from my POV, maybe splitting them one by one could look better.
I think this looks so much more logical. The only other comment I have is regarding the virCgroupAddTask call ordering...
If I'm reading right, prior to this set of changes the emulator code would call that right after virCgroupNewThread and then make the qemuSetupCgroupCpusetCpus and virCgroupHasController calls while vcpus and iothreads would call virCgroupAddTask after all cgroup related calls. I mention this mainly because I have a feint (or faint) recollection of there being issues when regarding ordering of calls and what gets copied when (details I really tried to forget).
Since emulator is the pid and vcpu/iothread are tid's I'm just being overly cautious that there's a difference in ordering that isn't readily apparent.
This looks reasonable to me, but hopefully someone else will chime in regarding the order of virCgroupNewThread, virCgroupAddTask, qemuSetupCgroupCpusetCpus, virCgroupSetCpusetMems, and qemuSetupCgroupVcpuBW prior to calling virProcessSetAffinity
The only difference here is whether you setup the cgroup and add the task to it after that or the other way around. And that's it, there's not more to it. There is no difference between PID/TID handling, nothing will have access to more resources because the domain is not running and adding task to a group that was not set up will be the same as keeping it where it was before the call to that function. However looking at it I see that there is a "bug" that could be made "cleaner". If we do it this way (adding task before setting the cgroup) and some of the settings fail, then the virCgroupRemove() will fail to remove that cgroup because there are tasks in it. At the end it won't boil down to anything bigger because we remove all the cgroups after qemu ends (or we kill it or whatever else). So there is error handling that could be done more nicely... So I'll change that.
Consider it a fragile ACK -
Should I wait for others, send a v2 or are you OK with me just moving the virCgroupAddTask() call after the setup?
John

On 06/24/2016 05:02 AM, Martin Kletzander wrote:
On Thu, Jun 23, 2016 at 03:06:50PM -0400, John Ferlan wrote:
On 06/22/2016 12:37 PM, Martin Kletzander wrote:
Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-)
Would have been so much easier one at a time... The scroll wheel on my mouse needs a break.
Yeah, I tried several different splits and none of them worked from my POV, maybe splitting them one by one could look better.
I think this looks so much more logical. The only other comment I have is regarding the virCgroupAddTask call ordering...
If I'm reading right, prior to this set of changes the emulator code would call that right after virCgroupNewThread and then make the qemuSetupCgroupCpusetCpus and virCgroupHasController calls while vcpus and iothreads would call virCgroupAddTask after all cgroup related calls. I mention this mainly because I have a feint (or faint) recollection of there being issues when regarding ordering of calls and what gets copied when (details I really tried to forget).
Since emulator is the pid and vcpu/iothread are tid's I'm just being overly cautious that there's a difference in ordering that isn't readily apparent.
This looks reasonable to me, but hopefully someone else will chime in regarding the order of virCgroupNewThread, virCgroupAddTask, qemuSetupCgroupCpusetCpus, virCgroupSetCpusetMems, and qemuSetupCgroupVcpuBW prior to calling virProcessSetAffinity
The only difference here is whether you setup the cgroup and add the task to it after that or the other way around. And that's it, there's not more to it. There is no difference between PID/TID handling, nothing will have access to more resources because the domain is not running and adding task to a group that was not set up will be the same as keeping it where it was before the call to that function.
However looking at it I see that there is a "bug" that could be made "cleaner". If we do it this way (adding task before setting the cgroup) and some of the settings fail, then the virCgroupRemove() will fail to remove that cgroup because there are tasks in it. At the end it won't boil down to anything bigger because we remove all the cgroups after qemu ends (or we kill it or whatever else). So there is error handling that could be done more nicely... So I'll change that.
Consider it a fragile ACK -
Should I wait for others, send a v2 or are you OK with me just moving the virCgroupAddTask() call after the setup?
Moving the AddTask is fine by me, I don't need to see another version. I just know from the last time I waded into the Cgroup patches water that one has to be very careful to avoid being bitten by something that lurks there. I think though your explanation and adjustment makes me feel better about the placement. If no one else has the time to look, then this is better than before... John FWIW: I think for patch 2 the ATTRIBUTE_UNUSED would have only been necessary on the qemuProcessSetupPid() - every other caller would have been using driver to call qemuProcessSetupPid

On Wed, Jun 22, 2016 at 06:37:21PM +0200, Martin Kletzander wrote:
Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 215fe5f2f210..d1247d2fd0f9 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,70 +2306,108 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, }
+/** + * qemuProcessSetupPid: + * + * This function sets resource properities (affinity, cgroups, + * scheduler) for any PID associated with a domain. It should be used + * to set up emulator PIDs as well as vCPU and I/O thread pids to + * ensure they are all handled the same way. + * + * Returns 0 on success, -1 on error. + */ static int -qemuProcessSetupEmulator(virDomainObjPtr vm) +qemuProcessSetupPid(virDomainObjPtr vm, + pid_t pid, + virCgroupThreadName nameval, + int id, + virBitmapPtr cpumask,
+ int sched_policy, + int sched_priority)
You can just pass a pointer to the virDomainThreadSchedParam structure.
{ - virBitmapPtr cpumask = NULL; - virCgroupPtr cgroup_emulator = NULL; qemuDomainObjPrivatePtr priv = vm->privateData;
- unsigned long long period = vm->def->cputune.emulator_period; - long long quota = vm->def->cputune.emulator_quota; + unsigned long long period = vm->def->cputune.period; + long long quota = vm->def->cputune.quota;
These two are not equivalent, we even have separate XML elements for them: <cputune> <period>1000000</period> <quota>-1</quota> <emulator_period>1000000</emulator_period> <emulator_quota>-1</emulator_quota> </cputune> Also, renaming the variables and reordering the code in the respective function first would make this fragile code easier to review. Jan

On Fri, Jun 24, 2016 at 12:51:40PM +0200, Ján Tomko wrote:
On Wed, Jun 22, 2016 at 06:37:21PM +0200, Martin Kletzander wrote:
Setting up cgroups and other things for all kinds of threads (the emulator thread, vCPU threads, I/O threads) was copy-pasted every time new thing was added. Over time each one of those functions changed a bit differently. So create one function that does all that setup and start using it. That will shave some duplicated code and maybe fix some bugs as well.
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_process.c | 278 +++++++++++++++--------------------------------- 1 file changed, 87 insertions(+), 191 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index 215fe5f2f210..d1247d2fd0f9 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,70 +2306,108 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, }
+/** + * qemuProcessSetupPid: + * + * This function sets resource properities (affinity, cgroups, + * scheduler) for any PID associated with a domain. It should be used + * to set up emulator PIDs as well as vCPU and I/O thread pids to + * ensure they are all handled the same way. + * + * Returns 0 on success, -1 on error. + */ static int -qemuProcessSetupEmulator(virDomainObjPtr vm) +qemuProcessSetupPid(virDomainObjPtr vm, + pid_t pid, + virCgroupThreadName nameval, + int id, + virBitmapPtr cpumask,
+ int sched_policy, + int sched_priority)
You can just pass a pointer to the virDomainThreadSchedParam structure.
{ - virBitmapPtr cpumask = NULL; - virCgroupPtr cgroup_emulator = NULL; qemuDomainObjPrivatePtr priv = vm->privateData;
- unsigned long long period = vm->def->cputune.emulator_period; - long long quota = vm->def->cputune.emulator_quota; + unsigned long long period = vm->def->cputune.period; + long long quota = vm->def->cputune.quota;
These two are not equivalent, we even have separate XML elements for them: <cputune> <period>1000000</period> <quota>-1</quota> <emulator_period>1000000</emulator_period> <emulator_quota>-1</emulator_quota> </cputune>
Also, renaming the variables and reordering the code in the respective function first would make this fragile code easier to review.
After the fiasco with _some_ other patch, I'll rather send a v2...
Jan

Some settings may be confusing and in case users use numad placement in combination with static placement we could warn them as it might not be wanted (but it's not forbidden). Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1254402 Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_driver.c | 4 +- src/qemu/qemu_process.c | 107 +++++++++++++++++++++++++++++++++++++++++------- src/qemu/qemu_process.h | 6 ++- 3 files changed, 99 insertions(+), 18 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 931efae27dee..4cf9f0560092 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4702,7 +4702,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, goto cleanup; } - if (qemuProcessSetupVcpu(vm, vcpu) < 0) + if (qemuProcessSetupVcpu(driver, vm, vcpu) < 0) goto cleanup; ret = 0; @@ -5828,7 +5828,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver, iothrid->thread_id = new_iothreads[idx]->thread_id; - if (qemuProcessSetupIOThread(vm, iothrid) < 0) + if (qemuProcessSetupIOThread(driver, vm, iothrid) < 0) goto cleanup; ret = 0; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d1247d2fd0f9..51709f8c9d58 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,6 +2306,75 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, } +static int +qemuProcessCheckCpusMemsAlignment(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virBitmapPtr cpumask, + const char *mem_mask) +{ + int ret = -1; + int hostnodes = 0; + char *cpumask_str = NULL; + char *tmpmask_str = NULL; + char *mem_cpus_str = NULL; + virCapsPtr caps = NULL; + virBitmapPtr tmpmask = NULL; + virBitmapPtr mem_cpus = NULL; + virBitmapPtr mem_nodes = NULL; + virDomainNumatuneMemMode mem_mode; + + if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) != 0) + return 0; + + if (mem_mode != VIR_DOMAIN_NUMATUNE_MEM_STRICT) + return 0; + + if (!mem_mask || !cpumask) + return 0; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + if (!(tmpmask = virBitmapNewCopy(cpumask))) + goto cleanup; + + if ((hostnodes = virNumaGetMaxNode()) < 0) + goto cleanup; + + if (virBitmapParse(mem_mask, &mem_nodes, hostnodes) < 0) + goto cleanup; + + if (!(mem_cpus = virCapabilitiesGetCpusForNodemask(caps, mem_nodes))) + goto cleanup; + + virBitmapSubtract(tmpmask, mem_cpus); + if (!virBitmapIsAllClear(tmpmask)) { + if (!(cpumask_str = virBitmapFormat(cpumask))) + goto cleanup; + + if (!(tmpmask_str = virBitmapFormat(tmpmask))) + goto cleanup; + + if (!(mem_cpus_str = virBitmapFormat(mem_cpus))) + goto cleanup; + + VIR_WARN("CPUs '%s' in cpumask '%s' might not have access to any NUMA " + "node in memory's nodeset '%s' which consists of CPUs: '%s'.", + tmpmask_str, cpumask_str, mem_mask, mem_cpus_str); + } + + ret = 0; + cleanup: + VIR_FREE(mem_cpus_str); + VIR_FREE(tmpmask_str); + VIR_FREE(cpumask_str); + virBitmapFree(mem_cpus); + virBitmapFree(tmpmask); + virObjectUnref(caps); + return ret; +} + + /** * qemuProcessSetupPid: * @@ -2317,7 +2386,8 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, * Returns 0 on success, -1 on error. */ static int -qemuProcessSetupPid(virDomainObjPtr vm, +qemuProcessSetupPid(virQEMUDriverPtr driver, + virDomainObjPtr vm, pid_t pid, virCgroupThreadName nameval, int id, @@ -2390,6 +2460,10 @@ qemuProcessSetupPid(virDomainObjPtr vm, if ((period || quota) && qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0) goto cleanup; + + if (qemuProcessCheckCpusMemsAlignment(driver, vm, + use_cpumask, mem_mask) < 0) + goto cleanup; } /* Setup legacy affinity. */ @@ -2415,9 +2489,10 @@ qemuProcessSetupPid(virDomainObjPtr vm, static int -qemuProcessSetupEmulator(virDomainObjPtr vm) +qemuProcessSetupEmulator(virQEMUDriverPtr driver, + virDomainObjPtr vm) { - return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR, + return qemuProcessSetupPid(driver, vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR, 0, vm->def->cputune.emulatorpin, VIR_PROC_POLICY_NONE, 0); } @@ -4616,20 +4691,22 @@ qemuProcessNetworkPrepareDevices(virDomainDefPtr def) * Returns 0 on success, -1 on error. */ int -qemuProcessSetupVcpu(virDomainObjPtr vm, +qemuProcessSetupVcpu(virQEMUDriverPtr driver, + virDomainObjPtr vm, unsigned int vcpuid) { pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid); virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid); - return qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU, + return qemuProcessSetupPid(driver, vm, vcpupid, VIR_CGROUP_THREAD_VCPU, vcpuid, vcpu->cpumask, vcpu->sched.policy, vcpu->sched.priority); } static int -qemuProcessSetupVcpus(virDomainObjPtr vm) +qemuProcessSetupVcpus(virQEMUDriverPtr driver, + virDomainObjPtr vm) { virDomainVcpuInfoPtr vcpu; unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def); @@ -4669,7 +4746,7 @@ qemuProcessSetupVcpus(virDomainObjPtr vm) if (!vcpu->online) continue; - if (qemuProcessSetupVcpu(vm, i) < 0) + if (qemuProcessSetupVcpu(driver, vm, i) < 0) return -1; } @@ -4678,11 +4755,12 @@ qemuProcessSetupVcpus(virDomainObjPtr vm) int -qemuProcessSetupIOThread(virDomainObjPtr vm, +qemuProcessSetupIOThread(virQEMUDriverPtr driver, + virDomainObjPtr vm, virDomainIOThreadIDDefPtr iothread) { - return qemuProcessSetupPid(vm, iothread->thread_id, + return qemuProcessSetupPid(driver, vm, iothread->thread_id, VIR_CGROUP_THREAD_IOTHREAD, iothread->iothread_id, iothread->cpumask, @@ -4692,14 +4770,15 @@ qemuProcessSetupIOThread(virDomainObjPtr vm, static int -qemuProcessSetupIOThreads(virDomainObjPtr vm) +qemuProcessSetupIOThreads(virQEMUDriverPtr driver, + virDomainObjPtr vm) { size_t i; for (i = 0; i < vm->def->niothreadids; i++) { virDomainIOThreadIDDefPtr info = vm->def->iothreadids[i]; - if (qemuProcessSetupIOThread(vm, info) < 0) + if (qemuProcessSetupIOThread(driver, vm, info) < 0) return -1; } @@ -5124,7 +5203,7 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup; VIR_DEBUG("Setting emulator tuning/settings"); - if (qemuProcessSetupEmulator(vm) < 0) + if (qemuProcessSetupEmulator(driver, vm) < 0) goto cleanup; VIR_DEBUG("Setting domain security labels"); @@ -5205,11 +5284,11 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup; VIR_DEBUG("Setting vCPU tuning/settings"); - if (qemuProcessSetupVcpus(vm) < 0) + if (qemuProcessSetupVcpus(driver, vm) < 0) goto cleanup; VIR_DEBUG("Setting IOThread tuning/settings"); - if (qemuProcessSetupIOThreads(vm) < 0) + if (qemuProcessSetupIOThreads(driver, vm) < 0) goto cleanup; VIR_DEBUG("Setting any required VM passwords"); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 37081ad4c177..ea830f628f40 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -174,9 +174,11 @@ virDomainDiskDefPtr qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm, int qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm); -int qemuProcessSetupVcpu(virDomainObjPtr vm, +int qemuProcessSetupVcpu(virQEMUDriverPtr driver, + virDomainObjPtr vm, unsigned int vcpuid); -int qemuProcessSetupIOThread(virDomainObjPtr vm, +int qemuProcessSetupIOThread(virQEMUDriverPtr driver, + virDomainObjPtr vm, virDomainIOThreadIDDefPtr iothread); int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver, -- 2.9.0

On 06/22/2016 12:37 PM, Martin Kletzander wrote:
Some settings may be confusing and in case users use numad placement in combination with static placement we could warn them as it might not be wanted (but it's not forbidden).
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1254402
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_driver.c | 4 +- src/qemu/qemu_process.c | 107 +++++++++++++++++++++++++++++++++++++++++------- src/qemu/qemu_process.h | 6 ++- 3 files changed, 99 insertions(+), 18 deletions(-)
Perhaps could have been two patches ... one to add the driver argument and the second to use it...
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 931efae27dee..4cf9f0560092 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4702,7 +4702,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, goto cleanup; }
- if (qemuProcessSetupVcpu(vm, vcpu) < 0) + if (qemuProcessSetupVcpu(driver, vm, vcpu) < 0) goto cleanup;
ret = 0; @@ -5828,7 +5828,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
iothrid->thread_id = new_iothreads[idx]->thread_id;
- if (qemuProcessSetupIOThread(vm, iothrid) < 0) + if (qemuProcessSetupIOThread(driver, vm, iothrid) < 0) goto cleanup;
ret = 0; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d1247d2fd0f9..51709f8c9d58 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,6 +2306,75 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, }
+static int +qemuProcessCheckCpusMemsAlignment(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virBitmapPtr cpumask, + const char *mem_mask) +{ + int ret = -1; + int hostnodes = 0; + char *cpumask_str = NULL; + char *tmpmask_str = NULL; + char *mem_cpus_str = NULL; + virCapsPtr caps = NULL; + virBitmapPtr tmpmask = NULL; + virBitmapPtr mem_cpus = NULL; + virBitmapPtr mem_nodes = NULL; + virDomainNumatuneMemMode mem_mode; + + if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) != 0) + return 0; + + if (mem_mode != VIR_DOMAIN_NUMATUNE_MEM_STRICT) + return 0; + + if (!mem_mask || !cpumask) + return 0; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + if (!(tmpmask = virBitmapNewCopy(cpumask))) + goto cleanup; + + if ((hostnodes = virNumaGetMaxNode()) < 0) + goto cleanup; + + if (virBitmapParse(mem_mask, &mem_nodes, hostnodes) < 0) + goto cleanup; + + if (!(mem_cpus = virCapabilitiesGetCpusForNodemask(caps, mem_nodes))) + goto cleanup; + + virBitmapSubtract(tmpmask, mem_cpus); + if (!virBitmapIsAllClear(tmpmask)) { + if (!(cpumask_str = virBitmapFormat(cpumask))) + goto cleanup; + + if (!(tmpmask_str = virBitmapFormat(tmpmask))) + goto cleanup; + + if (!(mem_cpus_str = virBitmapFormat(mem_cpus))) + goto cleanup; + + VIR_WARN("CPUs '%s' in cpumask '%s' might not have access to any NUMA " + "node in memory's nodeset '%s' which consists of CPUs: '%s'.", + tmpmask_str, cpumask_str, mem_mask, mem_cpus_str);
Hopefully enough details ;-)
+ } + + ret = 0; + cleanup: + VIR_FREE(mem_cpus_str); + VIR_FREE(tmpmask_str); + VIR_FREE(cpumask_str); + virBitmapFree(mem_cpus);
Coverity complains that mem_nodes is leaked.
+ virBitmapFree(tmpmask); + virObjectUnref(caps); + return ret; +} + + /** * qemuProcessSetupPid: * @@ -2317,7 +2386,8 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, * Returns 0 on success, -1 on error. */ static int -qemuProcessSetupPid(virDomainObjPtr vm, +qemuProcessSetupPid(virQEMUDriverPtr driver, + virDomainObjPtr vm, pid_t pid, virCgroupThreadName nameval, int id, @@ -2390,6 +2460,10 @@ qemuProcessSetupPid(virDomainObjPtr vm, if ((period || quota) && qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0) goto cleanup; + + if (qemuProcessCheckCpusMemsAlignment(driver, vm, + use_cpumask, mem_mask) < 0) + goto cleanup;
It doesn't seem to matter that for an emulator virCgroupSetCpusetMems is not called yet... But I figured I'd ask to double check! ACK with the coverity error fixed as this seems reasonable to me. John
}
/* Setup legacy affinity. */ @@ -2415,9 +2489,10 @@ qemuProcessSetupPid(virDomainObjPtr vm,
static int -qemuProcessSetupEmulator(virDomainObjPtr vm) +qemuProcessSetupEmulator(virQEMUDriverPtr driver, + virDomainObjPtr vm) { - return qemuProcessSetupPid(vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR, + return qemuProcessSetupPid(driver, vm, vm->pid, VIR_CGROUP_THREAD_EMULATOR, 0, vm->def->cputune.emulatorpin, VIR_PROC_POLICY_NONE, 0); } @@ -4616,20 +4691,22 @@ qemuProcessNetworkPrepareDevices(virDomainDefPtr def) * Returns 0 on success, -1 on error. */ int -qemuProcessSetupVcpu(virDomainObjPtr vm, +qemuProcessSetupVcpu(virQEMUDriverPtr driver, + virDomainObjPtr vm, unsigned int vcpuid) { pid_t vcpupid = qemuDomainGetVcpuPid(vm, vcpuid); virDomainVcpuInfoPtr vcpu = virDomainDefGetVcpu(vm->def, vcpuid);
- return qemuProcessSetupPid(vm, vcpupid, VIR_CGROUP_THREAD_VCPU, + return qemuProcessSetupPid(driver, vm, vcpupid, VIR_CGROUP_THREAD_VCPU, vcpuid, vcpu->cpumask, vcpu->sched.policy, vcpu->sched.priority); }
static int -qemuProcessSetupVcpus(virDomainObjPtr vm) +qemuProcessSetupVcpus(virQEMUDriverPtr driver, + virDomainObjPtr vm) { virDomainVcpuInfoPtr vcpu; unsigned int maxvcpus = virDomainDefGetVcpusMax(vm->def); @@ -4669,7 +4746,7 @@ qemuProcessSetupVcpus(virDomainObjPtr vm) if (!vcpu->online) continue;
- if (qemuProcessSetupVcpu(vm, i) < 0) + if (qemuProcessSetupVcpu(driver, vm, i) < 0) return -1; }
@@ -4678,11 +4755,12 @@ qemuProcessSetupVcpus(virDomainObjPtr vm)
int -qemuProcessSetupIOThread(virDomainObjPtr vm, +qemuProcessSetupIOThread(virQEMUDriverPtr driver, + virDomainObjPtr vm, virDomainIOThreadIDDefPtr iothread) {
- return qemuProcessSetupPid(vm, iothread->thread_id, + return qemuProcessSetupPid(driver, vm, iothread->thread_id, VIR_CGROUP_THREAD_IOTHREAD, iothread->iothread_id, iothread->cpumask, @@ -4692,14 +4770,15 @@ qemuProcessSetupIOThread(virDomainObjPtr vm,
static int -qemuProcessSetupIOThreads(virDomainObjPtr vm) +qemuProcessSetupIOThreads(virQEMUDriverPtr driver, + virDomainObjPtr vm) { size_t i;
for (i = 0; i < vm->def->niothreadids; i++) { virDomainIOThreadIDDefPtr info = vm->def->iothreadids[i];
- if (qemuProcessSetupIOThread(vm, info) < 0) + if (qemuProcessSetupIOThread(driver, vm, info) < 0) return -1; }
@@ -5124,7 +5203,7 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup;
VIR_DEBUG("Setting emulator tuning/settings"); - if (qemuProcessSetupEmulator(vm) < 0) + if (qemuProcessSetupEmulator(driver, vm) < 0) goto cleanup;
VIR_DEBUG("Setting domain security labels"); @@ -5205,11 +5284,11 @@ qemuProcessLaunch(virConnectPtr conn, goto cleanup;
VIR_DEBUG("Setting vCPU tuning/settings"); - if (qemuProcessSetupVcpus(vm) < 0) + if (qemuProcessSetupVcpus(driver, vm) < 0) goto cleanup;
VIR_DEBUG("Setting IOThread tuning/settings"); - if (qemuProcessSetupIOThreads(vm) < 0) + if (qemuProcessSetupIOThreads(driver, vm) < 0) goto cleanup;
VIR_DEBUG("Setting any required VM passwords"); diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h index 37081ad4c177..ea830f628f40 100644 --- a/src/qemu/qemu_process.h +++ b/src/qemu/qemu_process.h @@ -174,9 +174,11 @@ virDomainDiskDefPtr qemuProcessFindDomainDiskByAlias(virDomainObjPtr vm, int qemuConnectAgent(virQEMUDriverPtr driver, virDomainObjPtr vm);
-int qemuProcessSetupVcpu(virDomainObjPtr vm, +int qemuProcessSetupVcpu(virQEMUDriverPtr driver, + virDomainObjPtr vm, unsigned int vcpuid); -int qemuProcessSetupIOThread(virDomainObjPtr vm, +int qemuProcessSetupIOThread(virQEMUDriverPtr driver, + virDomainObjPtr vm, virDomainIOThreadIDDefPtr iothread);
int qemuRefreshVirtioChannelState(virQEMUDriverPtr driver,

On Thu, Jun 23, 2016 at 03:21:04PM -0400, John Ferlan wrote:
On 06/22/2016 12:37 PM, Martin Kletzander wrote:
Some settings may be confusing and in case users use numad placement in combination with static placement we could warn them as it might not be wanted (but it's not forbidden).
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1254402
Signed-off-by: Martin Kletzander <mkletzan@redhat.com> --- src/qemu/qemu_driver.c | 4 +- src/qemu/qemu_process.c | 107 +++++++++++++++++++++++++++++++++++++++++------- src/qemu/qemu_process.h | 6 ++- 3 files changed, 99 insertions(+), 18 deletions(-)
Perhaps could have been two patches ... one to add the driver argument and the second to use it...
I wanted to do that actually, but I had to add ATTRIBUTE_UNUSED everywhere and then remove it in the next patch which would not shorten the patch at all, and in my opinion not even made it more readable.
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 931efae27dee..4cf9f0560092 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -4702,7 +4702,7 @@ qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver, goto cleanup; }
- if (qemuProcessSetupVcpu(vm, vcpu) < 0) + if (qemuProcessSetupVcpu(driver, vm, vcpu) < 0) goto cleanup;
ret = 0; @@ -5828,7 +5828,7 @@ qemuDomainHotplugAddIOThread(virQEMUDriverPtr driver,
iothrid->thread_id = new_iothreads[idx]->thread_id;
- if (qemuProcessSetupIOThread(vm, iothrid) < 0) + if (qemuProcessSetupIOThread(driver, vm, iothrid) < 0) goto cleanup;
ret = 0; diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c index d1247d2fd0f9..51709f8c9d58 100644 --- a/src/qemu/qemu_process.c +++ b/src/qemu/qemu_process.c @@ -2306,6 +2306,75 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver, }
+static int +qemuProcessCheckCpusMemsAlignment(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virBitmapPtr cpumask, + const char *mem_mask) +{ + int ret = -1; + int hostnodes = 0; + char *cpumask_str = NULL; + char *tmpmask_str = NULL; + char *mem_cpus_str = NULL; + virCapsPtr caps = NULL; + virBitmapPtr tmpmask = NULL; + virBitmapPtr mem_cpus = NULL; + virBitmapPtr mem_nodes = NULL; + virDomainNumatuneMemMode mem_mode; + + if (virDomainNumatuneGetMode(vm->def->numa, -1, &mem_mode) != 0) + return 0; + + if (mem_mode != VIR_DOMAIN_NUMATUNE_MEM_STRICT) + return 0; + + if (!mem_mask || !cpumask) + return 0; + + if (!(caps = virQEMUDriverGetCapabilities(driver, false))) + goto cleanup; + + if (!(tmpmask = virBitmapNewCopy(cpumask))) + goto cleanup; + + if ((hostnodes = virNumaGetMaxNode()) < 0) + goto cleanup; + + if (virBitmapParse(mem_mask, &mem_nodes, hostnodes) < 0) + goto cleanup; + + if (!(mem_cpus = virCapabilitiesGetCpusForNodemask(caps, mem_nodes))) + goto cleanup; + + virBitmapSubtract(tmpmask, mem_cpus); + if (!virBitmapIsAllClear(tmpmask)) { + if (!(cpumask_str = virBitmapFormat(cpumask))) + goto cleanup; + + if (!(tmpmask_str = virBitmapFormat(tmpmask))) + goto cleanup; + + if (!(mem_cpus_str = virBitmapFormat(mem_cpus))) + goto cleanup; + + VIR_WARN("CPUs '%s' in cpumask '%s' might not have access to any NUMA " + "node in memory's nodeset '%s' which consists of CPUs: '%s'.", + tmpmask_str, cpumask_str, mem_mask, mem_cpus_str);
Hopefully enough details ;-)
+ } + + ret = 0; + cleanup: + VIR_FREE(mem_cpus_str); + VIR_FREE(tmpmask_str); + VIR_FREE(cpumask_str); + virBitmapFree(mem_cpus);
Coverity complains that mem_nodes is leaked.
Oh, good catch, I renamed them so many times (due to keeping part of the bad naming and trying to be descriptive at the same time). [...]
@@ -2390,6 +2460,10 @@ qemuProcessSetupPid(virDomainObjPtr vm, if ((period || quota) && qemuSetupCgroupVcpuBW(cgroup, period, quota) < 0) goto cleanup; + + if (qemuProcessCheckCpusMemsAlignment(driver, vm, + use_cpumask, mem_mask) < 0) + goto cleanup;
It doesn't seem to matter that for an emulator virCgroupSetCpusetMems is not called yet... But I figured I'd ask to double check!
Yes, it will be set a bit later on, so the check still makes sense.
ACK with the coverity error fixed as this seems reasonable to me.
Thanks, I'll wait for the consensus on the first one. Martin
John
participants (3)
-
John Ferlan
-
Ján Tomko
-
Martin Kletzander