Instead of directly accessing the array add a helper to do this.
---
src/qemu/qemu_cgroup.c | 3 ++-
src/qemu/qemu_domain.c | 20 ++++++++++++++++++++
src/qemu/qemu_domain.h | 1 +
src/qemu/qemu_driver.c | 7 ++++---
src/qemu/qemu_process.c | 5 ++---
5 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index a9cf9e8..402940f 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -1018,7 +1018,8 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
goto cleanup;
/* move the thread for vcpu to sub dir */
- if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0)
+ if (virCgroupAddTask(cgroup_vcpu,
+ qemuDomainGetVcpuPid(vm, i)) < 0)
goto cleanup;
if (period || quota) {
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 32ee5de..cdda129 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -4113,3 +4113,23 @@ qemuDomainHasVcpuPids(virDomainObjPtr vm)
return priv->nvcpupids > 0;
}
+
+
+/**
+ * qemuDomainGetVcpuPid:
+ * @vm: domain object
+ * @vcpu: cpu id
+ *
+ * Returns the vCPU pid. If @vcpu is offline or out of range 0 is returned.
+ */
+pid_t
+qemuDomainGetVcpuPid(virDomainObjPtr vm,
+ unsigned int vcpu)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ if (vcpu >= priv->nvcpupids)
+ return 0;
+
+ return priv->vcpupids[vcpu];
+}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 5e2b699..916d5d3 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -506,5 +506,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
const virDomainMemoryDef *mem);
bool qemuDomainHasVcpuPids(virDomainObjPtr vm);
+pid_t qemuDomainGetVcpuPid(virDomainObjPtr vm, unsigned int vcpu);
#endif /* __QEMU_DOMAIN_H__ */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 3b3761a..14a325a 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1449,7 +1449,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info,
int maxinfo,
&(info[i].cpu),
NULL,
vm->pid,
- priv->vcpupids[i]) < 0) {
+ qemuDomainGetVcpuPid(vm, i)) < 0) {
virReportSystemError(errno, "%s",
_("cannot get vCPU placement & pCPU
time"));
return -1;
@@ -1462,7 +1462,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info,
int maxinfo,
unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
virBitmapPtr map = NULL;
- if (!(map = virProcessGetAffinity(priv->vcpupids[v])))
+ if (!(map = virProcessGetAffinity(qemuDomainGetVcpuPid(vm, v))))
return -1;
virBitmapToDataBuf(map, cpumap, maplen);
@@ -5156,7 +5156,8 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
goto endjob;
}
} else {
- if (virProcessSetAffinity(priv->vcpupids[vcpu], pcpumap) < 0) {
+ if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, vcpu),
+ pcpumap) < 0) {
virReportError(VIR_ERR_SYSTEM_ERROR,
_("failed to set cpu affinity for vcpu %d"),
vcpu);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 2de2248..bfb338e 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2235,7 +2235,6 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
static int
qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
{
- qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDefPtr def = vm->def;
virDomainPinDefPtr pininfo;
int n;
@@ -2268,7 +2267,7 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
n)))
continue;
- if (virProcessSetAffinity(priv->vcpupids[n],
+ if (virProcessSetAffinity(qemuDomainGetVcpuPid(vm, n),
pininfo->cpumask) < 0) {
goto cleanup;
}
@@ -2356,7 +2355,7 @@ qemuProcessSetSchedulers(virDomainObjPtr vm)
size_t i = 0;
for (i = 0; i < priv->nvcpupids; i++) {
- if (qemuProcessSetSchedParams(i, priv->vcpupids[i],
+ if (qemuProcessSetSchedParams(i, qemuDomainGetVcpuPid(vm, i),
vm->def->cputune.nvcpusched,
vm->def->cputune.vcpusched) < 0)
return -1;
--
2.6.2