On 11/20/2015 10:22 AM, Peter Krempa wrote:
Instead of directly accessing the array add a helper to do this.
---
src/qemu/qemu_cgroup.c | 3 ++-
src/qemu/qemu_domain.c | 20 ++++++++++++++++++++
src/qemu/qemu_domain.h | 1 +
src/qemu/qemu_driver.c | 7 ++++---
src/qemu/qemu_process.c | 5 ++---
5 files changed, 29 insertions(+), 7 deletions(-)
I believe technically it's a "tid" and not a "pid", but since 1/2
the
code calls it one way and the other calls it a different way it's a coin
flip on whether it... Theoretically different than vm->pid too
(although I am still trying to recall why vcpupids[0] was being compared
to vm->pid)
Couple of other places according to cscope that still reference vcpupids[]:
qemuDomainObjPrivateXMLFormat
qemuDomainObjPrivateXMLParse
What about the innocent bystanders? IOW: What gets called with a now
potentially "0" for pid if the passed vcpu is out of bounds? Where 0 is
self/current process for some I believe. Not that this is any less worse
than what theoretically could happen with some out of range fetch, but
the question is more towards should we not make the call then? If the
goal is to make the code better, then perhaps the "error condition"
should be checked.
virCgroupAddTask
qemuGetProcessInfo
virProcessGetAffinity
virProcessSetAffinity
qemuProcessSetSchedParams
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index 56c2e90..d8a2b03 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -1023,7 +1023,8 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
goto cleanup;
/* move the thread for vcpu to sub dir */
- if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0)
+ if (virCgroupAddTask(cgroup_vcpu,
+ qemuDomainGetVCpuPid(vm, i)) < 0)
goto cleanup;
if (period || quota) {
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 8a45825..be1f2b4 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -3971,3 +3971,23 @@ qemuDomainHasVCpuPids(virDomainObjPtr vm)
return priv->nvcpupids > 0;
}
+
+
+/**
+ * qemuDomainGetVCpuPid:
+ * @vm: domain object
+ * @vcpu: cpu id
+ *
+ * Returns the vCPU pid. If @vcpu is offline or out of range 0 is returned.
+ */
+pid_t
+qemuDomainGetVCpuPid(virDomainObjPtr vm,
+ unsigned int vcpu)
Would prefer "VCPU" or "Vcpu"
An ACK would be conditional on your thoughts regarding usage of tid vs.
pid and how much error checking to do. I'm assuming right now that
you're setting something up for the next batch of changes.
John
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+
+ if (vcpu >= priv->nvcpupids)
+ return 0;
+
+ return priv->vcpupids[vcpu];
+}
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 7f2eca1..c1aad61 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -492,5 +492,6 @@ int qemuDomainDefValidateMemoryHotplug(const virDomainDef *def,
const virDomainMemoryDef *mem);
bool qemuDomainHasVCpuPids(virDomainObjPtr vm);
+pid_t qemuDomainGetVCpuPid(virDomainObjPtr vm, unsigned int vcpu);
#endif /* __QEMU_DOMAIN_H__ */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 4b7452c..c659328 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -1449,7 +1449,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info,
int maxinfo,
&(info[i].cpu),
NULL,
vm->pid,
- priv->vcpupids[i]) < 0) {
+ qemuDomainGetVCpuPid(vm, i)) < 0) {
virReportSystemError(errno, "%s",
_("cannot get vCPU placement & pCPU
time"));
return -1;
@@ -1462,7 +1462,7 @@ qemuDomainHelperGetVcpus(virDomainObjPtr vm, virVcpuInfoPtr info,
int maxinfo,
unsigned char *cpumap = VIR_GET_CPUMAP(cpumaps, maplen, v);
virBitmapPtr map = NULL;
- if (!(map = virProcessGetAffinity(priv->vcpupids[v])))
+ if (!(map = virProcessGetAffinity(qemuDomainGetVCpuPid(vm, v))))
return -1;
virBitmapToDataBuf(map, cpumap, maplen);
@@ -5156,7 +5156,8 @@ qemuDomainPinVcpuFlags(virDomainPtr dom,
goto endjob;
}
} else {
- if (virProcessSetAffinity(priv->vcpupids[vcpu], pcpumap) < 0) {
+ if (virProcessSetAffinity(qemuDomainGetVCpuPid(vm, vcpu),
+ pcpumap) < 0) {
virReportError(VIR_ERR_SYSTEM_ERROR,
_("failed to set cpu affinity for vcpu %d"),
vcpu);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index d7f45b3..4a2cc66 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2286,7 +2286,6 @@ qemuProcessSetLinkStates(virQEMUDriverPtr driver,
static int
qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
{
- qemuDomainObjPrivatePtr priv = vm->privateData;
virDomainDefPtr def = vm->def;
virDomainPinDefPtr pininfo;
int n;
@@ -2319,7 +2318,7 @@ qemuProcessSetVcpuAffinities(virDomainObjPtr vm)
n)))
continue;
- if (virProcessSetAffinity(priv->vcpupids[n],
+ if (virProcessSetAffinity(qemuDomainGetVCpuPid(vm, n),
pininfo->cpumask) < 0) {
goto cleanup;
}
@@ -2407,7 +2406,7 @@ qemuProcessSetSchedulers(virDomainObjPtr vm)
size_t i = 0;
for (i = 0; i < priv->nvcpupids; i++) {
- if (qemuProcessSetSchedParams(i, priv->vcpupids[i],
+ if (qemuProcessSetSchedParams(i, qemuDomainGetVCpuPid(vm, i),
vm->def->cputune.nvcpusched,
vm->def->cputune.vcpusched) < 0)
return -1;