Add qemuDomainAddIOThread and qemuDomainDelIOThread in order to add or
remove an IOThread to/from the host either for live or config optoins
The implementation for the 'live' option will use the iothreadpids list
in order to make decision, while the 'config' option will use the
iothreadids list. Additionally, for deletion each may have to adjust
the iothreadpin list.
IOThreads are implemented by qmp objects, the code makes use of the existing
qemuMonitorAddObject or qemuMonitorDelObject APIs.
Signed-off-by: John Ferlan <jferlan(a)redhat.com>
---
src/conf/domain_audit.c | 9 +
src/conf/domain_audit.h | 6 +
src/libvirt_private.syms | 1 +
src/qemu/qemu_driver.c | 432 +++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 448 insertions(+)
diff --git a/src/conf/domain_audit.c b/src/conf/domain_audit.c
index 159ebf5..92fcdd3 100644
--- a/src/conf/domain_audit.c
+++ b/src/conf/domain_audit.c
@@ -790,6 +790,15 @@ virDomainAuditVcpu(virDomainObjPtr vm,
return virDomainAuditResource(vm, "vcpu", oldvcpu, newvcpu, reason,
success);
}
+void
+virDomainAuditIOThread(virDomainObjPtr vm,
+ unsigned int oldiothread, unsigned int newiothread,
+ const char *reason, bool success)
+{
+ return virDomainAuditResource(vm, "iothread", oldiothread, newiothread,
+ reason, success);
+}
+
static void
virDomainAuditLifecycle(virDomainObjPtr vm, const char *op,
const char *reason, bool success)
diff --git a/src/conf/domain_audit.h b/src/conf/domain_audit.h
index 4c1ef90..97dadca 100644
--- a/src/conf/domain_audit.h
+++ b/src/conf/domain_audit.h
@@ -102,6 +102,12 @@ void virDomainAuditVcpu(virDomainObjPtr vm,
const char *reason,
bool success)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(4);
+void virDomainAuditIOThread(virDomainObjPtr vm,
+ unsigned int oldiothread,
+ unsigned int newiothread,
+ const char *reason,
+ bool success)
+ ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(4);
void virDomainAuditSecurityLabel(virDomainObjPtr vm,
bool success)
ATTRIBUTE_NONNULL(1);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 7e55aa0..e4629b1 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -129,6 +129,7 @@ virDomainAuditDisk;
virDomainAuditFS;
virDomainAuditHostdev;
virDomainAuditInit;
+virDomainAuditIOThread;
virDomainAuditMemory;
virDomainAuditNet;
virDomainAuditNetDevice;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d99f886..5b0784f 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6186,6 +6186,436 @@ qemuDomainPinIOThread(virDomainPtr dom,
return ret;
}
+static int
+qemuDomainHotplugIOThread(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ unsigned int iothread_id,
+ const char *name,
+ bool add)
+{
+ qemuDomainObjPrivatePtr priv = vm->privateData;
+ char *alias = NULL;
+ size_t i, idx;
+ int rc = -1;
+ int ret = -1;
+ unsigned int orig_niothreads = vm->def->iothreads;
+ unsigned int exp_niothreads = vm->def->iothreads;
+ int new_niothreads = 0;
+ qemuMonitorIOThreadInfoPtr *new_iothreads = NULL;
+ virCgroupPtr cgroup_iothread = NULL;
+ char *mem_mask = NULL;
+
+ /* Let's see if we can find this iothread_id in our iothreadpids list
+ * For add finding the same iothread_id will cause a failure since we
+ * cannot add the same iothread_id twice.
+ * For del finding our iothread_id is good since we cannot delete
+ * something that doesn't exist
+ */
+ for (idx = 0; idx < priv->niothreadpids; idx++) {
+ if (iothread_id == priv->iothreadpids[idx]->iothread_id)
+ break;
+ }
+
+ if (add) {
+ if (idx < priv->niothreadpids) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("an IOThread is already using iothread_id "
+ "'%u' in iothreadpids"),
+ iothread_id);
+ goto cleanup;
+ }
+ } else {
+ if (idx == priv->niothreadpids) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("cannot find IOThread '%u' in
iothreadpids"),
+ iothread_id);
+ goto cleanup;
+ }
+
+ /* The qemuDomainDelThread doesn't pass (or need to pass) the
+ * name. So we'll grab it here so that we can formulate the
+ * correct alias for qemuMonitorDelObject to find this object.
+ */
+ name = priv->iothreadpids[idx]->name;
+ }
+
+ /* Generate alias */
+ if (name) {
+ if (virAsprintf(&alias, "%s_iothread%u", name, iothread_id) <
0)
+ return -1;
+ } else {
+ if (virAsprintf(&alias, "iothread%u", iothread_id) < 0)
+ return -1;
+ }
+
+ qemuDomainObjEnterMonitor(driver, vm);
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("cannot change IOThreads for an inactive domain"));
+ goto exit_monitor;
+ }
+
+ if (add) {
+ rc = qemuMonitorAddObject(priv->mon, "iothread", alias, NULL);
+ exp_niothreads++;
+ } else {
+ rc = qemuMonitorDelObject(priv->mon, alias);
+ exp_niothreads--;
+ }
+
+ if (rc < 0)
+ goto exit_monitor;
+
+ /* After hotplugging the IOThreads we need to re-detect the
+ * IOThreads thread_id's, adjust the cgroups, thread affinity,
+ * and the priv->iothreadpids list.
+ */
+ if ((new_niothreads = qemuMonitorGetIOThreads(priv->mon,
+ &new_iothreads)) < 0) {
+ virResetLastError();
+ goto exit_monitor;
+ }
+
+ if (qemuDomainObjExitMonitor(driver, vm) < 0)
+ goto cleanup;
+
+ /* ohhh something went wrong */
+ if (new_niothreads != exp_niothreads) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("got wrong number of IOThread ids from QEMU monitor.
"
+ "got %d, wanted %d"),
+ new_niothreads, exp_niothreads);
+ vm->def->iothreads = new_niothreads;
+ goto cleanup;
+ }
+ vm->def->iothreads = exp_niothreads;
+
+ if (virDomainNumatuneGetMode(vm->def->numa, -1) ==
+ VIR_DOMAIN_NUMATUNE_MEM_STRICT &&
+ virDomainNumatuneMaybeFormatNodeset(vm->def->numa,
+ priv->autoNodeset,
+ &mem_mask, -1) < 0)
+ goto cleanup;
+
+
+ if (add) {
+ qemuDomainIOThreadInfoPtr info;
+ unsigned int idval = 0;
+ char *idname = NULL;
+ int thread_id;
+
+ /*
+ * If we've successfully added an IOThread, find out where we added it
+ * in the new IOThread list, so we can add it to our iothreadpids list
+ */
+ for (i = 0; i < new_niothreads; i++) {
+
+ if (qemuDomainParseIOThreadAlias(new_iothreads[i]->name,
+ &idval, &idname) < 0)
+ goto cleanup;
+
+ if (iothread_id == idval)
+ break;
+
+ VIR_FREE(idname);
+ }
+
+ /* something else went wrong */
+ if (idval != iothread_id) {
+ VIR_FREE(idname);
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("cannot find new IOThread '%u' in QEMU
monitor."),
+ iothread_id);
+ goto cleanup;
+ }
+ thread_id = new_iothreads[i]->thread_id;
+
+ if (VIR_ALLOC(info) < 0) {
+ VIR_FREE(idname);
+ goto cleanup;
+ }
+ info->thread_id = thread_id;
+ info->iothread_id = iothread_id;
+ info->name = idname;
+ if (VIR_APPEND_ELEMENT(priv->iothreadpids,
+ priv->niothreadpids, info) < 0) {
+ VIR_FREE(info->name);
+ VIR_FREE(info);
+ goto cleanup;
+ }
+
+ /* Add IOThread to cgroup if present */
+ if (priv->cgroup) {
+ cgroup_iothread =
+ qemuDomainAddCgroupForThread(priv->cgroup,
+ VIR_CGROUP_THREAD_IOTHREAD,
+ iothread_id, mem_mask, thread_id);
+ if (!cgroup_iothread)
+ // Do we remove from iothreadpids?
+ goto cleanup;
+ }
+
+ /* Inherit def->cpuset */
+ if (vm->def->cpumask) {
+ if (qemuDomainHotplugAddPin(vm->def->cpumask, iothread_id,
+ &vm->def->cputune.iothreadspin,
+ &vm->def->cputune.niothreadspin) <
0)
+
+ // Do we remove from iothreadpids && scratch the cgroup?
+ goto cleanup;
+
+ if (qemuDomainHotplugPinThread(vm->def->cpumask, iothread_id,
+ thread_id, cgroup_iothread) < 0)
+ // Do we remove from iothreadpids, iothreadspin, and cgroup
+ goto cleanup;
+
+ if (qemuProcessSetSchedParams(iothread_id, thread_id,
+ vm->def->cputune.niothreadsched,
+ vm->def->cputune.iothreadsched) < 0)
+ goto cleanup;
+ }
+ } else {
+
+ /* Remove our iothread_id from the list of iothreadpid's */
+ if (VIR_DELETE_ELEMENT(priv->iothreadpids, idx,
+ priv->niothreadpids) < 0)
+ goto cleanup;
+
+ /* Remove the cgroup links */
+ if (qemuDomainDelCgroupForThread(priv->cgroup,
+ VIR_CGROUP_THREAD_IOTHREAD,
+ iothread_id) < 0)
+ goto cleanup;
+
+ /* Free pin setting */
+ virDomainPinDel(&vm->def->cputune.iothreadspin,
+ &vm->def->cputune.niothreadspin,
+ iothread_id);
+ }
+
+ ret = 0;
+
+ cleanup:
+ if (new_iothreads) {
+ for (i = 0; i < new_niothreads; i++)
+ qemuMonitorIOThreadInfoFree(new_iothreads[i]);
+ VIR_FREE(new_iothreads);
+ }
+ VIR_FREE(mem_mask);
+ virDomainAuditIOThread(vm, orig_niothreads, new_niothreads,
+ "update", rc == 0);
+ if (cgroup_iothread)
+ virCgroupFree(&cgroup_iothread);
+ VIR_FREE(alias);
+ return ret;
+
+ exit_monitor:
+ ignore_value(qemuDomainObjExitMonitor(driver, vm));
+ goto cleanup;
+}
+
+static int
+qemuDomainChgIOThread(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ unsigned int iothread_id,
+ const char *name,
+ bool add,
+ unsigned int flags)
+{
+ virQEMUDriverConfigPtr cfg = NULL;
+ virCapsPtr caps = NULL;
+ qemuDomainObjPrivatePtr priv;
+ virCgroupPtr cgroup_temp = NULL;
+ virBitmapPtr all_nodes = NULL;
+ char *all_nodes_str = NULL;
+ char *mem_mask = NULL;
+ virDomainDefPtr persistentDef;
+ size_t i;
+ int ret = -1;
+
+ if ((unsigned short) iothread_id != iothread_id) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("argument out of range: %d"), iothread_id);
+ return -1;
+ }
+
+
+ cfg = virQEMUDriverGetConfig(driver);
+
+ if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
+ goto cleanup;
+
+ if (!add) {
+ /* If there is a disk using the IOThread to be removed, then fail. */
+ for (i = 0; i < vm->def->ndisks; i++) {
+ if (vm->def->disks[i]->iothread == iothread_id) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("cannot remove IOThread %u since it "
+ "is being used by disk path '%s'"),
+ iothread_id,
+ NULLSTR(vm->def->disks[i]->src->path));
+ goto cleanup;
+ }
+ }
+ }
+
+ priv = vm->privateData;
+
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ goto cleanup;
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+ if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
+ false, &cgroup_temp) < 0)
+ goto endjob;
+
+ if (!(all_nodes = virNumaGetHostNodeset()))
+ goto endjob;
+
+ if (!(all_nodes_str = virBitmapFormat(all_nodes)))
+ goto endjob;
+
+ if (virCgroupGetCpusetMems(cgroup_temp, &mem_mask) < 0 ||
+ virCgroupSetCpusetMems(cgroup_temp, all_nodes_str) < 0)
+ goto endjob;
+ }
+
+ if (virDomainLiveConfigHelperMethod(caps, driver->xmlopt, vm, &flags,
+ &persistentDef) < 0)
+ goto endjob;
+
+ /* For a live change - let's make sure the binary supports this */
+ if (flags & VIR_DOMAIN_AFFECT_LIVE &&
+ !virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_OBJECT_IOTHREAD)) {
+ virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+ _("IOThreads not supported with this binary"));
+ goto endjob;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_LIVE) {
+
+ if (qemuDomainHotplugIOThread(driver, vm, iothread_id, name, add) < 0)
+ goto endjob;
+
+ if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm) < 0)
+ goto endjob;
+ }
+
+ if (flags & VIR_DOMAIN_AFFECT_CONFIG) {
+ virDomainIOThreadIDDefPtr iddef;
+
+ iddef = virDomainIOThreadIDFind(persistentDef, iothread_id);
+ if (add) {
+ /* Already there? Then error since we cannot have the same
+ * iothread_id listed twice
+ */
+ if (iddef) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("an IOThread is already using iothread_id "
+ "'%u' in persistent iothreadids"),
+ iothread_id);
+ goto endjob;
+ }
+
+ if (virDomainIOThreadIDAdd(persistentDef, iothread_id, name) < 0)
+ goto endjob;
+
+ /* Nothing to do in iothreadspin list (that's a separate command) */
+
+ persistentDef->iothreads++;
+ } else {
+ if (!iddef) {
+ virReportError(VIR_ERR_INVALID_ARG,
+ _("cannot find IOThread '%u' in persistent
"
+ "iothreadids"),
+ iothread_id);
+ goto cleanup;
+ }
+
+ virDomainIOThreadIDDel(persistentDef, iothread_id);
+ virDomainPinDel(&persistentDef->cputune.iothreadspin,
+ &persistentDef->cputune.niothreadspin,
+ iothread_id);
+ persistentDef->iothreads--;
+ }
+
+ if (virDomainSaveConfig(cfg->configDir, persistentDef) < 0)
+ goto endjob;
+ }
+
+ ret = 0;
+
+ endjob:
+ if (mem_mask) {
+ virErrorPtr err = virSaveLastError();
+ virCgroupSetCpusetMems(cgroup_temp, mem_mask);
+ virSetError(err);
+ virFreeError(err);
+ }
+ qemuDomainObjEndJob(driver, vm);
+
+ cleanup:
+ VIR_FREE(mem_mask);
+ VIR_FREE(all_nodes_str);
+ virBitmapFree(all_nodes);
+ virCgroupFree(&cgroup_temp);
+ virObjectUnref(caps);
+ virObjectUnref(cfg);
+ return ret;
+}
+
+static int
+qemuDomainAddIOThread(virDomainPtr dom,
+ unsigned int iothread_id,
+ const char *name,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm = NULL;
+ int ret = -1;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainAddIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
+ goto cleanup;
+
+ ret = qemuDomainChgIOThread(driver, vm, iothread_id, name, true, flags);
+
+ cleanup:
+ qemuDomObjEndAPI(&vm);
+ return ret;
+}
+
+
+static int
+qemuDomainDelIOThread(virDomainPtr dom,
+ unsigned int iothread_id,
+ unsigned int flags)
+{
+ virQEMUDriverPtr driver = dom->conn->privateData;
+ virDomainObjPtr vm = NULL;
+ int ret = -1;
+
+ virCheckFlags(VIR_DOMAIN_AFFECT_LIVE |
+ VIR_DOMAIN_AFFECT_CONFIG, -1);
+
+ if (!(vm = qemuDomObjFromDomain(dom)))
+ goto cleanup;
+
+ if (virDomainDelIOThreadEnsureACL(dom->conn, vm->def, flags) < 0)
+ goto cleanup;
+ ret = qemuDomainChgIOThread(driver, vm, iothread_id, NULL, false, flags);
+
+ cleanup:
+ qemuDomObjEndAPI(&vm);
+ return ret;
+}
+
static int qemuDomainGetSecurityLabel(virDomainPtr dom, virSecurityLabelPtr seclabel)
{
virQEMUDriverPtr driver = dom->conn->privateData;
@@ -19863,6 +20293,8 @@ static virHypervisorDriver qemuHypervisorDriver = {
.domainGetMaxVcpus = qemuDomainGetMaxVcpus, /* 0.4.4 */
.domainGetIOThreadInfo = qemuDomainGetIOThreadInfo, /* 1.2.14 */
.domainPinIOThread = qemuDomainPinIOThread, /* 1.2.14 */
+ .domainAddIOThread = qemuDomainAddIOThread, /* 1.2.15 */
+ .domainDelIOThread = qemuDomainDelIOThread, /* 1.2.15 */
.domainGetSecurityLabel = qemuDomainGetSecurityLabel, /* 0.6.1 */
.domainGetSecurityLabelList = qemuDomainGetSecurityLabelList, /* 0.10.0 */
.nodeGetSecurityModel = qemuNodeGetSecurityModel, /* 0.6.1 */
--
2.1.0