create a new cgroup and move all hypervisor threads to the new cgroup.
And then we can do the other things:
1. limit only vcpu usage rather than the whole qemu
2. limit for hypervisor threads(include vhost-net threads)
Signed-off-by: Wen Congyang <wency(a)cn.fujitsu.com>
---
src/qemu/qemu_cgroup.c | 67 ++++++++++++++++++++++++++++++++++++++++++++---
src/qemu/qemu_cgroup.h | 2 ++
src/qemu/qemu_process.c | 6 ++++-
3 files changed, 70 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index f8f375f..5f7e8b0 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -523,11 +523,12 @@ int qemuSetupCgroupForVcpu(struct qemud_driver *driver,
virDomainObjPtr vm)
}
if (priv->nvcpupids == 0 || priv->vcpupids[0] == vm->pid) {
- /* If we does not know VCPU<->PID mapping or all vcpu runs in the same
+ /* If we does not know VCPU<->PID mapping or all vcpus run in the same
* thread, we cannot control each vcpu.
*/
- virCgroupFree(&cgroup);
- return 0;
+ qemuReportError(VIR_ERR_INTERNAL_ERROR,
+ _("Unable to get vcpus' pids."));
+ goto cleanup;
}
for (i = 0; i < priv->nvcpupids; i++) {
@@ -564,7 +565,11 @@ int qemuSetupCgroupForVcpu(struct qemud_driver *driver,
virDomainObjPtr vm)
return 0;
cleanup:
- virCgroupFree(&cgroup_vcpu);
+ if (cgroup_vcpu) {
+ virCgroupRemove(cgroup_vcpu);
+ virCgroupFree(&cgroup_vcpu);
+ }
+
if (cgroup) {
virCgroupRemove(cgroup);
virCgroupFree(&cgroup);
@@ -573,6 +578,60 @@ cleanup:
return -1;
}
+int qemuSetupCgroupForHypervisor(struct qemud_driver *driver,
+ virDomainObjPtr vm)
+{
+ virCgroupPtr cgroup = NULL;
+ virCgroupPtr cgroup_hypervisor = NULL;
+ int rc, i;
+
+ if (driver->cgroup == NULL)
+ return 0; /* Not supported, so claim success */
+
+ rc = virCgroupForDomain(driver->cgroup, vm->def->name, &cgroup, 0);
+ if (rc != 0) {
+ virReportSystemError(-rc,
+ _("Unable to find cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ rc = virCgroupForHypervisor(cgroup, &cgroup_hypervisor, 1);
+ if (rc < 0) {
+ virReportSystemError(-rc,
+ _("Unable to create hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+
+ for (i = 0; i < VIR_CGROUP_CONTROLLER_LAST; i++) {
+ rc = virCgroupMoveTask(cgroup, cgroup_hypervisor, i);
+ if (rc < 0) {
+ virReportSystemError(-rc,
+ _("Unable to move taks from domain cgroup to
"
+ "hypervisor cgroup for %s"),
+ vm->def->name);
+ goto cleanup;
+ }
+ }
+
+ virCgroupFree(&cgroup_hypervisor);
+ virCgroupFree(&cgroup);
+ return 0;
+
+cleanup:
+ if (cgroup_hypervisor) {
+ virCgroupRemove(cgroup_hypervisor);
+ virCgroupFree(&cgroup_hypervisor);
+ }
+
+ if (cgroup) {
+ virCgroupRemove(cgroup);
+ virCgroupFree(&cgroup);
+ }
+
+ return rc;
+}
int qemuRemoveCgroup(struct qemud_driver *driver,
virDomainObjPtr vm,
diff --git a/src/qemu/qemu_cgroup.h b/src/qemu/qemu_cgroup.h
index c1023b3..cf0d383 100644
--- a/src/qemu/qemu_cgroup.h
+++ b/src/qemu/qemu_cgroup.h
@@ -54,6 +54,8 @@ int qemuSetupCgroupVcpuBW(virCgroupPtr cgroup,
unsigned long long period,
long long quota);
int qemuSetupCgroupForVcpu(struct qemud_driver *driver, virDomainObjPtr vm);
+int qemuSetupCgroupForHypervisor(struct qemud_driver *driver,
+ virDomainObjPtr vm);
int qemuRemoveCgroup(struct qemud_driver *driver,
virDomainObjPtr vm,
int quiet);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index c5140c3..dcd4941 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3740,10 +3740,14 @@ int qemuProcessStart(virConnectPtr conn,
if (qemuProcessDetectVcpuPIDs(driver, vm) < 0)
goto cleanup;
- VIR_DEBUG("Setting cgroup for each VCPU(if required)");
+ VIR_DEBUG("Setting cgroup for each VCPU (if required)");
if (qemuSetupCgroupForVcpu(driver, vm) < 0)
goto cleanup;
+ VIR_DEBUG("Setting cgroup for hypervisor (if required)");
+ if (qemuSetupCgroupForHypervisor(driver, vm) < 0)
+ goto cleanup;
+
VIR_DEBUG("Setting VCPU affinities");
if (qemuProcessSetVcpuAffinites(conn, vm) < 0)
goto cleanup;
--
1.7.10.2