On 01/14/2016 11:27 AM, Peter Krempa wrote:
Free the old vcpupids array in case when this function is called
again
during the run of the VM. It will be later reused in the vCPU hotplug
code.
---
src/qemu/qemu_domain.c | 17 +++++++----------
1 file changed, 7 insertions(+), 10 deletions(-)
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 1ea1cd3..a6fb232 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -4275,7 +4275,7 @@ qemuDomainDetectVcpuPids(virQEMUDriverPtr driver,
int asyncJob)
{
pid_t *cpupids = NULL;
- int ncpupids;
+ int ncpupids = 0;
qemuDomainObjPrivatePtr priv = vm->privateData;
/*
@@ -4306,11 +4306,8 @@ qemuDomainDetectVcpuPids(virQEMUDriverPtr driver,
* Just disable CPU pinning with TCG until someone wants
* to try to do this hard work.
*/
- if (vm->def->virtType == VIR_DOMAIN_VIRT_QEMU) {
- priv->nvcpupids = 0;
- priv->vcpupids = NULL;
- return 0;
- }
+ if (vm->def->virtType == VIR_DOMAIN_VIRT_QEMU)
+ goto done;
if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
return -1;
While you're changing things here - the qemuDomainObjExitMonitor failure
could leak cpupids
Also, there's comment with "VCPU<-> PID", but how about either
"VCPU <-> PID" or "VCPU<->PID" ?
John
@@ -4322,10 +4319,8 @@ qemuDomainDetectVcpuPids(virQEMUDriverPtr
driver,
* support this command */
if (ncpupids <= 0) {
virResetLastError();
-
- priv->nvcpupids = 0;
- priv->vcpupids = NULL;
- return 0;
+ ncpupids = 0;
+ goto done;
}
if (ncpupids != virDomainDefGetVcpus(vm->def)) {
@@ -4337,6 +4332,8 @@ qemuDomainDetectVcpuPids(virQEMUDriverPtr driver,
return -1;
}
+ done:
+ VIR_FREE(priv->vcpupids);
priv->nvcpupids = ncpupids;
priv->vcpupids = cpupids;
return 0;