[PATCH v2 00/15] qemu: Allow setting EventLoopBaseProperties

v2 of: https://listman.redhat.com/archives/libvir-list/2022-June/232118.html diff to v1: - switched from <mainloop/> to <defaultiothread/> - switched from long long to int for pool sizes (this means that the 02/16 patch from the original series that introduced virXMLPropLongLong() is no longer needed and thus not in this series). - Extended some docs - Hopefully, I've worked in all Peter's review points. Michal Prívozník (15): virml: Introduce VIR_XML_PROP_NONNEGATIVE flag virDomainDefParseIOThreads: Use g_autoptr() for @iothrid virDomainIOThreadIDDefArrayInit: Decrease scope of @iothrid conf: Move iothread formatter into a separate function conf: Introduce allocator for virDomainIOThreadIDDef conf: Introduce pool_min and pool_max attributes to IOThread qemu: Introduce QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX qemu_validate: Check if QEMU's capable of setting iothread pool size qemu: Generate command line for IOThread pool size include: Introduce typed params for virDomainSetIOThreadParams wrt pool size qemu: Wire up new virDomainSetIOThreadParams parameters virsh: Wire up new virDomainSetIOThreadParams parameters conf: Expose QEMU's main loop object qemu_validate: Check if QEMU's capable of setting main loop pool size qemu: Generate command line for main-loop pool size docs/formatdomain.rst | 14 +- docs/manpages/virsh.rst | 7 +- include/libvirt/libvirt-domain.h | 22 +++ src/conf/domain_conf.c | 162 +++++++++++++++--- src/conf/domain_conf.h | 11 ++ src/conf/schemas/domaincommon.rng | 25 +++ src/conf/virconftypes.h | 2 + src/qemu/qemu_capabilities.c | 2 + src/qemu/qemu_capabilities.h | 1 + src/qemu/qemu_command.c | 26 ++- src/qemu/qemu_driver.c | 62 ++++++- src/qemu/qemu_monitor.h | 4 + src/qemu/qemu_monitor_json.c | 2 + src/qemu/qemu_validate.c | 33 ++++ src/util/virxml.c | 7 + src/util/virxml.h | 3 + .../caps_7.1.0.x86_64.xml | 1 + ...othreads-ids-pool-sizes.x86_64-latest.args | 45 +++++ .../iothreads-ids-pool-sizes.xml | 62 +++++++ tests/qemuxml2argvtest.c | 1 + ...iothreads-ids-pool-sizes.x86_64-latest.xml | 1 + tests/qemuxml2xmltest.c | 1 + tools/virsh-domain.c | 24 ++- 23 files changed, 480 insertions(+), 38 deletions(-) create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml create mode 120000 tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml -- 2.35.1

For easier attribute parsing we have virXMLProp*() family of functions. These accept flags through which a caller can pose some conditions onto the attribute value, for instance: VIR_XML_PROP_NONZERO when the attribute may not be zero, etc. What we are missing is VIR_XML_PROP_NONNEGATIVE when the attribute value may be non-negative. Obviously, this flag makes sense only for some members of the virXMLProp*() family. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/util/virxml.c | 7 +++++++ src/util/virxml.h | 3 +++ 2 files changed, 10 insertions(+) diff --git a/src/util/virxml.c b/src/util/virxml.c index 12b2ef635a..d6e2e5dd91 100644 --- a/src/util/virxml.c +++ b/src/util/virxml.c @@ -643,6 +643,13 @@ virXMLPropInt(xmlNodePtr node, return -1; } + if ((flags & VIR_XML_PROP_NONNEGATIVE) && (val < 0)) { + virReportError(VIR_ERR_XML_ERROR, + _("Invalid value for attribute '%s' in element '%s': '%s'. Expected non-negative value"), + name, node->name, tmp); + return -1; + } + if ((flags & VIR_XML_PROP_NONZERO) && (val == 0)) { virReportError(VIR_ERR_XML_ERROR, _("Invalid value for attribute '%s' in element '%s': Zero is not permitted"), diff --git a/src/util/virxml.h b/src/util/virxml.h index 5d49056bc7..539228a9ba 100644 --- a/src/util/virxml.h +++ b/src/util/virxml.h @@ -38,6 +38,9 @@ typedef enum { VIR_XML_PROP_NONE = 0, VIR_XML_PROP_REQUIRED = 1 << 0, /* Attribute may not be absent */ VIR_XML_PROP_NONZERO = 1 << 1, /* Attribute may not be zero */ + VIR_XML_PROP_NONNEGATIVE = 1 << 2, /* Attribute may not be negative, makes + sense only for some virXMLProp*() + functions. */ } virXMLPropFlags; -- 2.35.1

Using g_autoptr() for @iothrid variable inside virDomainDefParseIOThreads() allows us to drop explicit call to virDomainIOThreadIDDefFree() in one case. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/conf/domain_conf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 5d0d436a40..3b4274e037 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -17046,7 +17046,8 @@ virDomainDefParseIOThreads(virDomainDef *def, def->iothreadids = g_new0(virDomainIOThreadIDDef *, n); for (i = 0; i < n; i++) { - virDomainIOThreadIDDef *iothrid = NULL; + g_autoptr(virDomainIOThreadIDDef) iothrid = NULL; + if (!(iothrid = virDomainIOThreadIDDefParseXML(nodes[i]))) return -1; @@ -17054,10 +17055,9 @@ virDomainDefParseIOThreads(virDomainDef *def, virReportError(VIR_ERR_XML_ERROR, _("duplicate iothread id '%u' found"), iothrid->iothread_id); - virDomainIOThreadIDDefFree(iothrid); return -1; } - def->iothreadids[def->niothreadids++] = iothrid; + def->iothreadids[def->niothreadids++] = g_steal_pointer(&iothrid); } return virDomainIOThreadIDDefArrayInit(def, iothreads); -- 2.35.1

In virDomainIOThreadIDDefArrayInit() the variable @iothrid is used only inside a loop but is declared for whole function. Bring the variable into the loop so that it's obvious that the variable is not used elsewhere. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/conf/domain_conf.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 3b4274e037..eec5941089 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -3508,7 +3508,6 @@ virDomainIOThreadIDDefArrayInit(virDomainDef *def, { size_t i; ssize_t nxt = -1; - virDomainIOThreadIDDef *iothrid = NULL; g_autoptr(virBitmap) thrmap = NULL; /* Same value (either 0 or some number), then we have none to fill in or @@ -3533,6 +3532,8 @@ virDomainIOThreadIDDefArrayInit(virDomainDef *def, /* Populate iothreadids[] using the set bit number from thrmap */ while (def->niothreadids < iothreads) { + g_autoptr(virDomainIOThreadIDDef) iothrid = NULL; + if ((nxt = virBitmapNextSetBit(thrmap, nxt)) < 0) { virReportError(VIR_ERR_INTERNAL_ERROR, "%s", _("failed to populate iothreadids")); @@ -3541,7 +3542,7 @@ virDomainIOThreadIDDefArrayInit(virDomainDef *def, iothrid = g_new0(virDomainIOThreadIDDef, 1); iothrid->iothread_id = nxt; iothrid->autofill = true; - def->iothreadids[def->niothreadids++] = iothrid; + def->iothreadids[def->niothreadids++] = g_steal_pointer(&iothrid); } return 0; -- 2.35.1

Formatting iothreads is currently open coded inside of virDomainDefFormatInternalSetRootName(). While this works, it makes the function needlessly long, especially if the formatting code will expand in near future. Therefore, move it into a separate function. At the same time, make virDomainDefIothreadShouldFormat() accept const domain definition so that the new function can also accept const domain definition. Formatters shouldn't need to change definition. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/conf/domain_conf.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index eec5941089..c60ab4e064 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -27569,7 +27569,7 @@ virDomainCpuDefFormat(virBuffer *buf, static bool -virDomainDefIothreadShouldFormat(virDomainDef *def) +virDomainDefIothreadShouldFormat(const virDomainDef *def) { size_t i; @@ -27582,6 +27582,31 @@ virDomainDefIothreadShouldFormat(virDomainDef *def) } +static void +virDomainDefIOThreadsFormat(virBuffer *buf, + const virDomainDef *def) +{ + g_auto(virBuffer) childrenBuf = VIR_BUFFER_INIT_CHILD(buf); + size_t i; + + if (def->niothreadids == 0) + return; + + virBufferAsprintf(buf, "<iothreads>%zu</iothreads>\n", + def->niothreadids); + + if (!virDomainDefIothreadShouldFormat(def)) + return; + + for (i = 0; i < def->niothreadids; i++) { + virBufferAsprintf(&childrenBuf, "<iothread id='%u'/>\n", + def->iothreadids[i]->iothread_id); + } + + virXMLFormatElement(buf, "iothreadids", NULL, &childrenBuf); +} + + static void virDomainIOMMUDefFormat(virBuffer *buf, const virDomainIOMMUDef *iommu) @@ -28227,20 +28252,7 @@ virDomainDefFormatInternalSetRootName(virDomainDef *def, if (virDomainCpuDefFormat(buf, def) < 0) return -1; - if (def->niothreadids > 0) { - virBufferAsprintf(buf, "<iothreads>%zu</iothreads>\n", - def->niothreadids); - if (virDomainDefIothreadShouldFormat(def)) { - virBufferAddLit(buf, "<iothreadids>\n"); - virBufferAdjustIndent(buf, 2); - for (i = 0; i < def->niothreadids; i++) { - virBufferAsprintf(buf, "<iothread id='%u'/>\n", - def->iothreadids[i]->iothread_id); - } - virBufferAdjustIndent(buf, -2); - virBufferAddLit(buf, "</iothreadids>\n"); - } - } + virDomainDefIOThreadsFormat(buf, def); if (virDomainCputuneDefFormat(buf, def, flags) < 0) return -1; -- 2.35.1

So far, iothread configuration structure (virDomainIOThreadIDDef) is allocated by plain g_new0(). This is perfectly okay because all members of the struct default to value 0 anyway. But soon this is going to change. Therefore, replace those g_new0() with a function so that the default value can be set consistently in one place. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/conf/domain_conf.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index c60ab4e064..8d4b27dbc0 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -3476,6 +3476,15 @@ virDomainIOThreadIDArrayHasPin(virDomainDef *def) } +static virDomainIOThreadIDDef * +virDomainIOThreadIDDefNew(void) +{ + virDomainIOThreadIDDef *def = g_new0(virDomainIOThreadIDDef, 1); + + return def; +} + + void virDomainIOThreadIDDefFree(virDomainIOThreadIDDef *def) { @@ -3539,7 +3548,7 @@ virDomainIOThreadIDDefArrayInit(virDomainDef *def, _("failed to populate iothreadids")); return -1; } - iothrid = g_new0(virDomainIOThreadIDDef, 1); + iothrid = virDomainIOThreadIDDefNew(); iothrid->iothread_id = nxt; iothrid->autofill = true; def->iothreadids[def->niothreadids++] = g_steal_pointer(&iothrid); @@ -17008,7 +17017,7 @@ virDomainIdmapDefParseXML(xmlXPathContextPtr ctxt, static virDomainIOThreadIDDef * virDomainIOThreadIDDefParseXML(xmlNodePtr node) { - g_autoptr(virDomainIOThreadIDDef) iothrid = g_new0(virDomainIOThreadIDDef, 1); + g_autoptr(virDomainIOThreadIDDef) iothrid = virDomainIOThreadIDDefNew(); if (virXMLPropUInt(node, "id", 10, VIR_XML_PROP_REQUIRED | VIR_XML_PROP_NONZERO, @@ -22931,8 +22940,7 @@ virDomainIOThreadIDAdd(virDomainDef *def, { virDomainIOThreadIDDef *iothrid = NULL; - iothrid = g_new0(virDomainIOThreadIDDef, 1); - + iothrid = virDomainIOThreadIDDefNew(); iothrid->iothread_id = iothread_id; VIR_APPEND_ELEMENT_COPY(def->iothreadids, def->niothreadids, iothrid); -- 2.35.1

At least in case of QEMU an IOThread is actually a pool of threads (see iothread_set_aio_context_params() in QEMU's code base). As such, it can have minimal and maximal number of worker threads. Allow setting them in domain XML. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/formatdomain.rst | 5 +- src/conf/domain_conf.c | 34 ++++++++++- src/conf/domain_conf.h | 3 + src/conf/schemas/domaincommon.rng | 10 +++ .../iothreads-ids-pool-sizes.xml | 61 +++++++++++++++++++ ...iothreads-ids-pool-sizes.x86_64-latest.xml | 1 + tests/qemuxml2xmltest.c | 1 + 7 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml create mode 120000 tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst index 312b605a8b..2aa39b2f63 100644 --- a/docs/formatdomain.rst +++ b/docs/formatdomain.rst @@ -675,7 +675,7 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` <iothread id="2"/> <iothread id="4"/> <iothread id="6"/> - <iothread id="8"/> + <iothread id="8" thread_pool_min="2" thread_pool_max="32"/> </iothreadids> ... </domain> @@ -696,6 +696,9 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` any predefined ``id``. If there are more ``iothreadids`` defined than ``iothreads`` defined for the domain, then the ``iothreads`` value will be adjusted accordingly. :since:`Since 1.2.15` + The element has two optional attributes ``thread_pool_min`` and + ``hread_pool_max`` which allow setting lower and upper boundary for number + of worker threads for given IOThread. :since:`Since 8.5.0` CPU Tuning diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 8d4b27dbc0..8891289866 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -3481,6 +3481,9 @@ virDomainIOThreadIDDefNew(void) { virDomainIOThreadIDDef *def = g_new0(virDomainIOThreadIDDef, 1); + def->thread_pool_min = -1; + def->thread_pool_max = -1; + return def; } @@ -17008,7 +17011,7 @@ virDomainIdmapDefParseXML(xmlXPathContextPtr ctxt, * * <iothreads>4</iothreads> * <iothreadids> - * <iothread id='1'/> + * <iothread id='1' thread_pool_min="0" thread_pool_max="60"/> * <iothread id='3'/> * <iothread id='5'/> * <iothread id='7'/> @@ -17024,6 +17027,16 @@ virDomainIOThreadIDDefParseXML(xmlNodePtr node) &iothrid->iothread_id) < 0) return NULL; + if (virXMLPropInt(node, "thread_pool_min", 10, + VIR_XML_PROP_NONNEGATIVE, + &iothrid->thread_pool_min, -1) < 0) + return NULL; + + if (virXMLPropInt(node, "thread_pool_max", 10, + VIR_XML_PROP_NONNEGATIVE, + &iothrid->thread_pool_max, -1) < 0) + return NULL; + return g_steal_pointer(&iothrid); } @@ -27607,8 +27620,23 @@ virDomainDefIOThreadsFormat(virBuffer *buf, return; for (i = 0; i < def->niothreadids; i++) { - virBufferAsprintf(&childrenBuf, "<iothread id='%u'/>\n", - def->iothreadids[i]->iothread_id); + virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + virBuffer attrBuf = VIR_BUFFER_INITIALIZER; + + virBufferAsprintf(&attrBuf, " id='%u'", + iothread->iothread_id); + + if (iothread->thread_pool_min >= 0) { + virBufferAsprintf(&attrBuf, " thread_pool_min='%d'", + iothread->thread_pool_min); + } + + if (iothread->thread_pool_max >= 0) { + virBufferAsprintf(&attrBuf, " thread_pool_max='%d'", + iothread->thread_pool_max); + } + + virXMLFormatElement(&childrenBuf, "iothread", &attrBuf, NULL); } virXMLFormatElement(buf, "iothreadids", NULL, &childrenBuf); diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h index e7e0f24443..2a5001f15c 100644 --- a/src/conf/domain_conf.h +++ b/src/conf/domain_conf.h @@ -2646,6 +2646,9 @@ struct _virDomainIOThreadIDDef { virBitmap *cpumask; virDomainThreadSchedParam sched; + + int thread_pool_min; + int thread_pool_max; }; void virDomainIOThreadIDDefFree(virDomainIOThreadIDDef *def); diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng index cc598212a8..7e6310832b 100644 --- a/src/conf/schemas/domaincommon.rng +++ b/src/conf/schemas/domaincommon.rng @@ -829,6 +829,16 @@ <attribute name="id"> <ref name="unsignedInt"/> </attribute> + <optional> + <attribute name="thread_pool_min"> + <ref name="unsignedLong"/> + </attribute> + </optional> + <optional> + <attribute name="thread_pool_max"> + <ref name="unsignedLong"/> + </attribute> + </optional> </element> </zeroOrMore> </element> diff --git a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml new file mode 100644 index 0000000000..0f93d14f12 --- /dev/null +++ b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml @@ -0,0 +1,61 @@ +<domain type='qemu'> + <name>QEMUGuest1</name> + <uuid>c7a5fdbd-edaf-9455-926a-d65c16db1809</uuid> + <memory unit='KiB'>219136</memory> + <currentMemory unit='KiB'>219136</currentMemory> + <vcpu placement='static'>6</vcpu> + <iothreads>5</iothreads> + <iothreadids> + <iothread id='2' thread_pool_min='0' thread_pool_max='60'/> + <iothread id='4' thread_pool_min='1' thread_pool_max='1'/> + <iothread id='1'/> + <iothread id='3'/> + <iothread id='5'/> + </iothreadids> + <os> + <type arch='x86_64' machine='q35'>hvm</type> + <boot dev='hd'/> + </os> + <cpu mode='custom' match='exact' check='none'> + <model fallback='forbid'>qemu64</model> + </cpu> + <clock offset='utc'/> + <on_poweroff>destroy</on_poweroff> + <on_reboot>restart</on_reboot> + <on_crash>destroy</on_crash> + <devices> + <emulator>/usr/bin/qemu-system-x86_64</emulator> + <disk type='block' device='disk'> + <driver name='qemu' type='raw'/> + <source dev='/dev/HostVG/QEMUGuest1'/> + <target dev='vda' bus='virtio'/> + <address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/> + </disk> + <controller type='pci' index='0' model='pcie-root'/> + <controller type='pci' index='1' model='pcie-root-port'> + <model name='pcie-root-port'/> + <target chassis='1' port='0x8'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0' multifunction='on'/> + </controller> + <controller type='pci' index='2' model='pcie-root-port'> + <model name='pcie-root-port'/> + <target chassis='2' port='0x9'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/> + </controller> + <controller type='pci' index='3' model='pcie-root-port'> + <model name='pcie-root-port'/> + <target chassis='3' port='0xa'/> + <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/> + </controller> + <controller type='usb' index='0' model='qemu-xhci'> + <address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/> + </controller> + <controller type='sata' index='0'> + <address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/> + </controller> + <input type='mouse' bus='ps2'/> + <input type='keyboard' bus='ps2'/> + <audio id='1' type='none'/> + <memballoon model='none'/> + </devices> +</domain> diff --git a/tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml b/tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml new file mode 120000 index 0000000000..6ed642fe5f --- /dev/null +++ b/tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml @@ -0,0 +1 @@ +../qemuxml2argvdata/iothreads-ids-pool-sizes.xml \ No newline at end of file diff --git a/tests/qemuxml2xmltest.c b/tests/qemuxml2xmltest.c index 3bd57306cc..b85d5fb757 100644 --- a/tests/qemuxml2xmltest.c +++ b/tests/qemuxml2xmltest.c @@ -597,6 +597,7 @@ mymain(void) DO_TEST_NOCAPS("smp"); DO_TEST_NOCAPS("iothreads"); DO_TEST_NOCAPS("iothreads-ids"); + DO_TEST_CAPS_LATEST("iothreads-ids-pool-sizes"); DO_TEST_NOCAPS("iothreads-ids-partial"); DO_TEST_NOCAPS("cputune-iothreads"); DO_TEST_NOCAPS("iothreads-disk"); -- 2.35.1

While at it you can update the summary ^^ On Tue, Jun 07, 2022 at 14:52:50 +0200, Michal Privoznik wrote:
At least in case of QEMU an IOThread is actually a pool of threads (see iothread_set_aio_context_params() in QEMU's code base). As such, it can have minimal and maximal number of worker threads. Allow setting them in domain XML.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/formatdomain.rst | 5 +- src/conf/domain_conf.c | 34 ++++++++++- src/conf/domain_conf.h | 3 + src/conf/schemas/domaincommon.rng | 10 +++ .../iothreads-ids-pool-sizes.xml | 61 +++++++++++++++++++ ...iothreads-ids-pool-sizes.x86_64-latest.xml | 1 + tests/qemuxml2xmltest.c | 1 + 7 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml create mode 120000 tests/qemuxml2xmloutdata/iothreads-ids-pool-sizes.x86_64-latest.xml
diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst index 312b605a8b..2aa39b2f63 100644 --- a/docs/formatdomain.rst +++ b/docs/formatdomain.rst @@ -675,7 +675,7 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` <iothread id="2"/> <iothread id="4"/> <iothread id="6"/> - <iothread id="8"/> + <iothread id="8" thread_pool_min="2" thread_pool_max="32"/> </iothreadids> ... </domain> @@ -696,6 +696,9 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` any predefined ``id``. If there are more ``iothreadids`` defined than ``iothreads`` defined for the domain, then the ``iothreads`` value will be adjusted accordingly. :since:`Since 1.2.15` + The element has two optional attributes ``thread_pool_min`` and + ``hread_pool_max`` which allow setting lower and upper boundary for number
'thread' ^^
+ of worker threads for given IOThread. :since:`Since 8.5.0`
[...]
@@ -27607,8 +27620,23 @@ virDomainDefIOThreadsFormat(virBuffer *buf, return;
for (i = 0; i < def->niothreadids; i++) { - virBufferAsprintf(&childrenBuf, "<iothread id='%u'/>\n", - def->iothreadids[i]->iothread_id); + virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + virBuffer attrBuf = VIR_BUFFER_INITIALIZER;
Commonly we use 'g_auto(virBuffer)', not a problem here, just a convention.
+ + virBufferAsprintf(&attrBuf, " id='%u'", + iothread->iothread_id);
Reviewed-by: Peter Krempa <pkrempa@redhat.com>

This capability reflects whether QEMU allows setting thread-pool-min and thread-pool-max attributes on iothread object. Since both attributes were introduced in the same commit (v7.0.0-878-g71ad4713cc) and can't exist independently of each other we can stick with one capability covering both of them. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_capabilities.c | 2 ++ src/qemu/qemu_capabilities.h | 1 + tests/qemucapabilitiesdata/caps_7.1.0.x86_64.xml | 1 + 3 files changed, 4 insertions(+) diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c index d0c8217825..e8c96ffc1f 100644 --- a/src/qemu/qemu_capabilities.c +++ b/src/qemu/qemu_capabilities.c @@ -676,6 +676,7 @@ VIR_ENUM_IMPL(virQEMUCaps, /* 430 */ "chardev.qemu-vdagent", /* QEMU_CAPS_CHARDEV_QEMU_VDAGENT */ "display-dbus", /* QEMU_CAPS_DISPLAY_DBUS */ + "iothread.thread-pool-max", /* QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX */ ); @@ -1627,6 +1628,7 @@ static struct virQEMUCapsStringFlags virQEMUCapsQMPSchemaQueries[] = { { "calc-dirty-rate/arg-type/mode", QEMU_CAPS_DIRTYRATE_MODE }, { "chardev-add/arg-type/backend/+qemu-vdagent", QEMU_CAPS_CHARDEV_QEMU_VDAGENT }, { "query-display-options/ret-type/+dbus", QEMU_CAPS_DISPLAY_DBUS }, + { "object-add/arg-type/+iothread/thread-pool-max", QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX }, }; typedef struct _virQEMUCapsObjectTypeProps virQEMUCapsObjectTypeProps; diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h index 26cd655db3..9bb81f87c2 100644 --- a/src/qemu/qemu_capabilities.h +++ b/src/qemu/qemu_capabilities.h @@ -651,6 +651,7 @@ typedef enum { /* virQEMUCapsFlags grouping marker for syntax-check */ /* 430 */ QEMU_CAPS_CHARDEV_QEMU_VDAGENT, /* -chardev qemu-vdagent */ QEMU_CAPS_DISPLAY_DBUS, /* -display dbus */ + QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX, /* -object iothread.thread-pool-max */ QEMU_CAPS_LAST /* this must always be the last item */ } virQEMUCapsFlags; diff --git a/tests/qemucapabilitiesdata/caps_7.1.0.x86_64.xml b/tests/qemucapabilitiesdata/caps_7.1.0.x86_64.xml index 04ba103688..95d61bc274 100644 --- a/tests/qemucapabilitiesdata/caps_7.1.0.x86_64.xml +++ b/tests/qemucapabilitiesdata/caps_7.1.0.x86_64.xml @@ -243,6 +243,7 @@ <flag name='virtio-net.rss'/> <flag name='chardev.qemu-vdagent'/> <flag name='display-dbus'/> + <flag name='iothread.thread-pool-max'/> <version>7000050</version> <kvmVersion>0</kvmVersion> <microcodeVersion>43100244</microcodeVersion> -- 2.35.1

Now that we have a capability that reflects whether QEMU is capable of setting iothread pool size, let's introduce a validator check to make sure users are not trying to use this feature with QEMU that doesn't support it. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_validate.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c index 9b6245e6d7..7d11ae2c92 100644 --- a/src/qemu/qemu_validate.c +++ b/src/qemu/qemu_validate.c @@ -384,6 +384,27 @@ qemuValidateDomainDefCpu(virQEMUDriver *driver, } +static int +qemuValidateDomainDefIOThreads(const virDomainDef *def, + virQEMUCaps *qemuCaps) +{ + size_t i; + + for (i = 0; i < def->niothreadids; i++) { + virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + + if ((iothread->thread_pool_min != -1 || iothread->thread_pool_max != -1) && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX)) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("thread_pool_min and thread_pool_max is not supported by this QEMU binary")); + return -1; + } + } + + return 0; +} + + static int qemuValidateDomainDefClockTimers(const virDomainDef *def, virQEMUCaps *qemuCaps) @@ -1168,6 +1189,9 @@ qemuValidateDomainDef(const virDomainDef *def, if (qemuDomainDefValidateMemoryHotplug(def, NULL) < 0) return -1; + if (qemuValidateDomainDefIOThreads(def, qemuCaps) < 0) + return -1; + if (qemuValidateDomainDefClockTimers(def, qemuCaps) < 0) return -1; -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:52 +0200, Michal Privoznik wrote:
Now that we have a capability that reflects whether QEMU is capable of setting iothread pool size, let's introduce a validator check to make sure users are not trying to use this feature with QEMU that doesn't support it.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_validate.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+)
diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c index 9b6245e6d7..7d11ae2c92 100644 --- a/src/qemu/qemu_validate.c +++ b/src/qemu/qemu_validate.c @@ -384,6 +384,27 @@ qemuValidateDomainDefCpu(virQEMUDriver *driver, }
+static int +qemuValidateDomainDefIOThreads(const virDomainDef *def, + virQEMUCaps *qemuCaps) +{ + size_t i; + + for (i = 0; i < def->niothreadids; i++) { + virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + + if ((iothread->thread_pool_min != -1 || iothread->thread_pool_max != -1) && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX)) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("thread_pool_min and thread_pool_max is not supported by this QEMU binary")); + return -1; + }
You'll also need a check that 'thread_pool_max', if supplied, must be more or equal to 'thread_pool_min', otherwise: $ virsh dumpxml cd | grep thread_pool <iothread id='3' thread_pool_min='4' thread_pool_max='3'/> $ virsh start cd error: Failed to start domain 'cd' error: internal error: process exited while connecting to monitor: 2022-06-07T14:58:52.420554Z qemu-system-x86_64: bad thread-pool-min/thread-pool-max values

On 6/7/22 16:59, Peter Krempa wrote:
On Tue, Jun 07, 2022 at 14:52:52 +0200, Michal Privoznik wrote:
Now that we have a capability that reflects whether QEMU is capable of setting iothread pool size, let's introduce a validator check to make sure users are not trying to use this feature with QEMU that doesn't support it.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> Reviewed-by: Peter Krempa <pkrempa@redhat.com> --- src/qemu/qemu_validate.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+)
diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c index 9b6245e6d7..7d11ae2c92 100644 --- a/src/qemu/qemu_validate.c +++ b/src/qemu/qemu_validate.c @@ -384,6 +384,27 @@ qemuValidateDomainDefCpu(virQEMUDriver *driver, }
+static int +qemuValidateDomainDefIOThreads(const virDomainDef *def, + virQEMUCaps *qemuCaps) +{ + size_t i; + + for (i = 0; i < def->niothreadids; i++) { + virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + + if ((iothread->thread_pool_min != -1 || iothread->thread_pool_max != -1) && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX)) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("thread_pool_min and thread_pool_max is not supported by this QEMU binary")); + return -1; + }
You'll also need a check that 'thread_pool_max', if supplied, must be more or equal to 'thread_pool_min', otherwise:
$ virsh dumpxml cd | grep thread_pool <iothread id='3' thread_pool_min='4' thread_pool_max='3'/> $ virsh start cd error: Failed to start domain 'cd' error: internal error: process exited while connecting to monitor: 2022-06-07T14:58:52.420554Z qemu-system-x86_64: bad thread-pool-min/thread-pool-max values
Ah, good point. But there's nothing QEMU specific about this so let me put it into hypervisor agnostic validator. Which in turn allows me to amend this change to the 06/15 patch where parser/formatter is introduced. Michal

Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_command.c | 10 ++++- ...othreads-ids-pool-sizes.x86_64-latest.args | 44 +++++++++++++++++++ tests/qemuxml2argvtest.c | 1 + 3 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index 4f4d8cf25c..2734be7679 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -7441,9 +7441,15 @@ qemuBuildIOThreadCommandLine(virCommand *cmd, for (i = 0; i < def->niothreadids; i++) { g_autoptr(virJSONValue) props = NULL; - g_autofree char *alias = g_strdup_printf("iothread%u", def->iothreadids[i]->iothread_id); + const virDomainIOThreadIDDef *iothread = def->iothreadids[i]; + g_autofree char *alias = NULL; - if (qemuMonitorCreateObjectProps(&props, "iothread", alias, NULL) < 0) + alias = g_strdup_printf("iothread%u", iothread->iothread_id); + + if (qemuMonitorCreateObjectProps(&props, "iothread", alias, + "k:thread-pool-min", iothread->thread_pool_min, + "k:thread-pool-max", iothread->thread_pool_max, + NULL) < 0) return -1; if (qemuBuildObjectCommandlineFromJSON(cmd, props, qemuCaps) < 0) diff --git a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args new file mode 100644 index 0000000000..32517e3d29 --- /dev/null +++ b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args @@ -0,0 +1,44 @@ +LC_ALL=C \ +PATH=/bin \ +HOME=/tmp/lib/domain--1-QEMUGuest1 \ +USER=test \ +LOGNAME=test \ +XDG_DATA_HOME=/tmp/lib/domain--1-QEMUGuest1/.local/share \ +XDG_CACHE_HOME=/tmp/lib/domain--1-QEMUGuest1/.cache \ +XDG_CONFIG_HOME=/tmp/lib/domain--1-QEMUGuest1/.config \ +/usr/bin/qemu-system-x86_64 \ +-name guest=QEMUGuest1,debug-threads=on \ +-S \ +-object '{"qom-type":"secret","id":"masterKey0","format":"raw","file":"/tmp/lib/domain--1-QEMUGuest1/master-key.aes"}' \ +-machine q35,usb=off,dump-guest-core=off,memory-backend=pc.ram \ +-accel tcg \ +-cpu qemu64 \ +-m 214 \ +-object '{"qom-type":"memory-backend-ram","id":"pc.ram","size":224395264}' \ +-overcommit mem-lock=off \ +-smp 6,sockets=6,cores=1,threads=1 \ +-object '{"qom-type":"iothread","id":"iothread2","thread-pool-min":0,"thread-pool-max":60}' \ +-object '{"qom-type":"iothread","id":"iothread4","thread-pool-min":1,"thread-pool-max":1}' \ +-object '{"qom-type":"iothread","id":"iothread1"}' \ +-object '{"qom-type":"iothread","id":"iothread3"}' \ +-object '{"qom-type":"iothread","id":"iothread5"}' \ +-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \ +-display none \ +-no-user-config \ +-nodefaults \ +-chardev socket,id=charmonitor,fd=1729,server=on,wait=off \ +-mon chardev=charmonitor,id=monitor,mode=control \ +-rtc base=utc \ +-no-shutdown \ +-no-acpi \ +-boot strict=on \ +-device '{"driver":"pcie-root-port","port":8,"chassis":1,"id":"pci.1","bus":"pcie.0","multifunction":true,"addr":"0x1"}' \ +-device '{"driver":"pcie-root-port","port":9,"chassis":2,"id":"pci.2","bus":"pcie.0","addr":"0x1.0x1"}' \ +-device '{"driver":"pcie-root-port","port":10,"chassis":3,"id":"pci.3","bus":"pcie.0","addr":"0x1.0x2"}' \ +-device '{"driver":"qemu-xhci","id":"usb","bus":"pci.1","addr":"0x0"}' \ +-blockdev '{"driver":"host_device","filename":"/dev/HostVG/QEMUGuest1","node-name":"libvirt-1-storage","auto-read-only":true,"discard":"unmap"}' \ +-blockdev '{"node-name":"libvirt-1-format","read-only":false,"driver":"raw","file":"libvirt-1-storage"}' \ +-device '{"driver":"virtio-blk-pci","bus":"pci.2","addr":"0x0","drive":"libvirt-1-format","id":"virtio-disk0","bootindex":1}' \ +-audiodev '{"id":"audio1","driver":"none"}' \ +-sandbox on,obsolete=deny,elevateprivileges=deny,spawn=deny,resourcecontrol=deny \ +-msg timestamp=on diff --git a/tests/qemuxml2argvtest.c b/tests/qemuxml2argvtest.c index bffe7aef8a..22e2661793 100644 --- a/tests/qemuxml2argvtest.c +++ b/tests/qemuxml2argvtest.c @@ -2038,6 +2038,7 @@ mymain(void) DO_TEST("iothreads", QEMU_CAPS_OBJECT_IOTHREAD); DO_TEST("iothreads-ids", QEMU_CAPS_OBJECT_IOTHREAD); DO_TEST("iothreads-ids-partial", QEMU_CAPS_OBJECT_IOTHREAD); + DO_TEST_CAPS_LATEST("iothreads-ids-pool-sizes"); DO_TEST_FAILURE_NOCAPS("iothreads-nocap"); DO_TEST("iothreads-disk", QEMU_CAPS_OBJECT_IOTHREAD); DO_TEST("iothreads-disk-virtio-ccw", QEMU_CAPS_OBJECT_IOTHREAD, -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:53 +0200, Michal Privoznik wrote:
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_command.c | 10 ++++- ...othreads-ids-pool-sizes.x86_64-latest.args | 44 +++++++++++++++++++ tests/qemuxml2argvtest.c | 1 + 3 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args
Reviewed-by: Peter Krempa <pkrempa@redhat.com>

Our public API offers virDomainSetIOThreadParams() function which allows users to set various aspects of IOThreads. Introduce two new typed parameters: VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX which will allow users to modify the thread-pool-min and thread-pool-max attributes of an iothread. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- include/libvirt/libvirt-domain.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index 24846046aa..b0020d247e 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -2493,6 +2493,28 @@ int virDomainDelIOThread(virDomainPtr domain, */ # define VIR_DOMAIN_IOTHREAD_POLL_SHRINK "poll_shrink" +/** + * VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN: + * + * Sets the lower bound for thread pool size. A value of -1 disables this bound + * leaving hypervisor use its default value. Accepted type is + * VIR_TYPED_PARAM_INT. + * + * Since: 8.5.0 + */ +# define VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN "thread_pool_min" + +/** + * VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX: + * + * Sets the upper bound for thread pool size. A value of -1 disables this bound + * leaving hypervisor use its default value. Accepted type is + * VIR_TYPED_PARAM_INT. + * + * Since: 8.5.0 + */ +# define VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX "thread_pool_max" + int virDomainSetIOThreadParams(virDomainPtr domain, unsigned int iothread_id, virTypedParameterPtr params, -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:54 +0200, Michal Privoznik wrote:
Our public API offers virDomainSetIOThreadParams() function which allows users to set various aspects of IOThreads. Introduce two new typed parameters: VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX which will allow users to modify the thread-pool-min and thread-pool-max attributes of an iothread.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- include/libvirt/libvirt-domain.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
Reviewed-by: Peter Krempa <pkrempa@redhat.com>

Introduced in previous commit, QEMU driver needs to be taught how to set VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX parameters on given IOThread. Fortunately, this is fairly trivial to do and since these two parameters are exposed in domain XML too the update of inactive XML can be wired up too. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_driver.c | 62 +++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.h | 4 +++ src/qemu/qemu_monitor_json.c | 2 ++ 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0c6645ed89..1770a1882b 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5318,6 +5318,26 @@ qemuDomainHotplugModIOThread(virQEMUDriver *driver, } +static int +qemuDomainHotplugModIOThreadIDDef(virDomainIOThreadIDDef *def, + qemuMonitorIOThreadInfo mondef) +{ + /* These have no representation in domain XML */ + if (mondef.set_poll_grow || + mondef.set_poll_max_ns || + mondef.set_poll_shrink) + return -1; + + if (mondef.set_thread_pool_min) + def->thread_pool_min = mondef.thread_pool_min; + + if (mondef.set_thread_pool_max) + def->thread_pool_max = mondef.thread_pool_max; + + return 0; +} + + static int qemuDomainHotplugDelIOThread(virQEMUDriver *driver, virDomainObj *vm, @@ -5425,6 +5445,10 @@ qemuDomainIOThreadParseParams(virTypedParameterPtr params, VIR_TYPED_PARAM_UINT, VIR_DOMAIN_IOTHREAD_POLL_SHRINK, VIR_TYPED_PARAM_UINT, + VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN, + VIR_TYPED_PARAM_INT, + VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX, + VIR_TYPED_PARAM_INT, NULL) < 0) return -1; @@ -5449,6 +5473,20 @@ qemuDomainIOThreadParseParams(virTypedParameterPtr params, if (rc == 1) iothread->set_poll_shrink = true; + if ((rc = virTypedParamsGetInt(params, nparams, + VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN, + &iothread->thread_pool_min)) < 0) + return -1; + if (rc == 1) + iothread->set_thread_pool_min = true; + + if ((rc = virTypedParamsGetInt(params, nparams, + VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX, + &iothread->thread_pool_max)) < 0) + return -1; + if (rc == 1) + iothread->set_thread_pool_max = true; + if (iothread->set_poll_max_ns && iothread->poll_max_ns > INT_MAX) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, _("poll-max-ns (%llu) must be less than or equal to %d"), @@ -5491,6 +5529,7 @@ qemuDomainChgIOThread(virQEMUDriver *driver, qemuDomainObjPrivate *priv; virDomainDef *def; virDomainDef *persistentDef; + virDomainIOThreadIDDef *iothreaddef = NULL; int ret = -1; cfg = virQEMUDriverGetConfig(driver); @@ -5530,7 +5569,9 @@ qemuDomainChgIOThread(virQEMUDriver *driver, break; case VIR_DOMAIN_IOTHREAD_ACTION_MOD: - if (!(virDomainIOThreadIDFind(def, iothread.iothread_id))) { + iothreaddef = virDomainIOThreadIDFind(def, iothread.iothread_id); + + if (!iothreaddef) { virReportError(VIR_ERR_INVALID_ARG, _("cannot find IOThread '%u' in iothreadids"), iothread.iothread_id); @@ -5540,6 +5581,7 @@ qemuDomainChgIOThread(virQEMUDriver *driver, if (qemuDomainHotplugModIOThread(driver, vm, iothread) < 0) goto endjob; + qemuDomainHotplugModIOThreadIDDef(iothreaddef, iothread); break; } @@ -5567,10 +5609,20 @@ qemuDomainChgIOThread(virQEMUDriver *driver, break; case VIR_DOMAIN_IOTHREAD_ACTION_MOD: - virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", - _("configuring persistent polling values is " - "not supported")); - goto endjob; + iothreaddef = virDomainIOThreadIDFind(def, iothread.iothread_id); + + if (!iothreaddef) { + virReportError(VIR_ERR_INVALID_ARG, + _("cannot find IOThread '%u' in iothreadids"), + iothread.iothread_id); + goto endjob; + } + + if (qemuDomainHotplugModIOThreadIDDef(iothreaddef, iothread) < 0) { + virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", + _("configuring persistent polling values is not supported")); + goto endjob; + } break; } diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h index be341d5196..997d8737f8 100644 --- a/src/qemu/qemu_monitor.h +++ b/src/qemu/qemu_monitor.h @@ -1308,9 +1308,13 @@ struct _qemuMonitorIOThreadInfo { unsigned long long poll_max_ns; unsigned int poll_grow; unsigned int poll_shrink; + int thread_pool_min; + int thread_pool_max; bool set_poll_max_ns; bool set_poll_grow; bool set_poll_shrink; + bool set_thread_pool_min; + bool set_thread_pool_max; }; int qemuMonitorGetIOThreads(qemuMonitor *mon, qemuMonitorIOThreadInfo ***iothreads, diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index dc05dfd047..e8fe1eceae 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -7431,6 +7431,8 @@ qemuMonitorJSONSetIOThread(qemuMonitor *mon, VIR_IOTHREAD_SET_PROP("poll-max-ns", poll_max_ns); VIR_IOTHREAD_SET_PROP("poll-grow", poll_grow); VIR_IOTHREAD_SET_PROP("poll-shrink", poll_shrink); + VIR_IOTHREAD_SET_PROP("thread-pool-min", thread_pool_min); + VIR_IOTHREAD_SET_PROP("thread-pool-max", thread_pool_max); #undef VIR_IOTHREAD_SET_PROP -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:55 +0200, Michal Privoznik wrote:
Introduced in previous commit, QEMU driver needs to be taught how to set VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX parameters on given IOThread. Fortunately, this is fairly trivial to do and since these two parameters are exposed in domain XML too the update of inactive XML can be wired up too.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_driver.c | 62 +++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.h | 4 +++ src/qemu/qemu_monitor_json.c | 2 ++ 3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0c6645ed89..1770a1882b 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c
[...]
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index dc05dfd047..e8fe1eceae 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -7431,6 +7431,8 @@ qemuMonitorJSONSetIOThread(qemuMonitor *mon, VIR_IOTHREAD_SET_PROP("poll-max-ns", poll_max_ns); VIR_IOTHREAD_SET_PROP("poll-grow", poll_grow); VIR_IOTHREAD_SET_PROP("poll-shrink", poll_shrink); + VIR_IOTHREAD_SET_PROP("thread-pool-min", thread_pool_min); + VIR_IOTHREAD_SET_PROP("thread-pool-max", thread_pool_max);
+/** + * VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN: + * + * Sets the lower bound for thread pool size. A value of -1 disables this bound + * leaving hypervisor use its default value. Accepted type is + * VIR_TYPED_PARAM_INT. + * + * Since: 8.5.0 + */ +# define VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN "thread_pool_min" I've tried following: $ virsh iothreadset cd --id 3 --thread-pool-min 123 error: internal error: unable to execute QEMU command 'qom-set': bad thread-pool-min/thread-pool-max values $ virsh iothreadset cd --id 3 --thread-pool-max 123 $ virsh iothreadset cd --id 3 --thread-pool-min 2 $ virsh iothreadset cd --id 3 --thread-pool-min -1 error: internal error: unable to execute QEMU command 'qom-set': thread-pool-min value must be in range [0, 9223372036854775807] There are two conclusions: - '-1' value simply doesn't work as documented - we'll need to document that when live-setting 'max' needs to be set first

On Tue, Jun 07, 2022 at 17:06:50 +0200, Peter Krempa wrote:
On Tue, Jun 07, 2022 at 14:52:55 +0200, Michal Privoznik wrote:
Introduced in previous commit, QEMU driver needs to be taught how to set VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX parameters on given IOThread. Fortunately, this is fairly trivial to do and since these two parameters are exposed in domain XML too the update of inactive XML can be wired up too.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_driver.c | 62 +++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.h | 4 +++ src/qemu/qemu_monitor_json.c | 2 ++ 3 files changed, 63 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0c6645ed89..1770a1882b 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c
[...]
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c index dc05dfd047..e8fe1eceae 100644 --- a/src/qemu/qemu_monitor_json.c +++ b/src/qemu/qemu_monitor_json.c @@ -7431,6 +7431,8 @@ qemuMonitorJSONSetIOThread(qemuMonitor *mon, VIR_IOTHREAD_SET_PROP("poll-max-ns", poll_max_ns); VIR_IOTHREAD_SET_PROP("poll-grow", poll_grow); VIR_IOTHREAD_SET_PROP("poll-shrink", poll_shrink); + VIR_IOTHREAD_SET_PROP("thread-pool-min", thread_pool_min); + VIR_IOTHREAD_SET_PROP("thread-pool-max", thread_pool_max);
+/** + * VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN: + * + * Sets the lower bound for thread pool size. A value of -1 disables this bound + * leaving hypervisor use its default value. Accepted type is + * VIR_TYPED_PARAM_INT. + * + * Since: 8.5.0 + */ +# define VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN "thread_pool_min"
I've tried following:
$ virsh iothreadset cd --id 3 --thread-pool-min 123 error: internal error: unable to execute QEMU command 'qom-set': bad thread-pool-min/thread-pool-max values
$ virsh iothreadset cd --id 3 --thread-pool-max 123
$ virsh iothreadset cd --id 3 --thread-pool-min 2
$ virsh iothreadset cd --id 3 --thread-pool-min -1 error: internal error: unable to execute QEMU command 'qom-set': thread-pool-min value must be in range [0, 9223372036854775807]
There are two conclusions: - '-1' value simply doesn't work as documented - we'll need to document that when live-setting 'max' needs to be set first
So the second happens only if you pick a minimum which is greater than the maximum auto-picked by qemu. Also since VIR_IOTHREAD_SET_PROP is a discrete command the following compound operation doesn't work properly: virsh iothreadset cd --id 3 --thread-pool-max 1234 --thread-pool-min 123 error: internal error: unable to execute QEMU command 'qom-set': bad thread-pool-min/thread-pool-max values As it would IMO be a bit too overkill to try to order them properly when changing the size I suggest you ask users to do it sequentially in the docs.

On Tue, Jun 07, 2022 at 14:52:55 +0200, Michal Privoznik wrote:
Introduced in previous commit, QEMU driver needs to be taught how to set VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN and VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX parameters on given IOThread. Fortunately, this is fairly trivial to do and since these two parameters are exposed in domain XML too the update of inactive XML can be wired up too.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_driver.c | 62 +++++++++++++++++++++++++++++++++--- src/qemu/qemu_monitor.h | 4 +++ src/qemu/qemu_monitor_json.c | 2 ++ 3 files changed, 63 insertions(+), 5 deletions(-)
[...]
@@ -5567,10 +5609,20 @@ qemuDomainChgIOThread(virQEMUDriver *driver, break;
case VIR_DOMAIN_IOTHREAD_ACTION_MOD: - virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s", - _("configuring persistent polling values is " - "not supported")); - goto endjob; + iothreaddef = virDomainIOThreadIDFind(def, iothread.iothread_id);
This needs to reference 'persistentDef': When setting the params on an inactive VM: 0x00007ffff7bef078 in virDomainIOThreadIDFind (def=0x0, iothread_id=3) at ../../../libvirt/src/conf/domain_conf.c:22939 22939 if (!def->iothreadids || !def->niothreadids) (gdb) bt #0 0x00007ffff7bef078 in virDomainIOThreadIDFind (def=0x0, iothread_id=3) at ../../../libvirt/src/conf/domain_conf.c:22939 #1 0x00007fffcb057309 in qemuDomainChgIOThread (driver=0x7fff9c14e910, vm=0x7fff9c1e5880, iothread=..., action=VIR_DOMAIN_IOTHREAD_ACTION_MOD, flags=0) at ../../../libvirt/src/qemu/qemu_driver.c:5612 #2 0x00007fffcb057aee in qemuDomainSetIOThreadParams (dom=0x7fff94014c10, iothread_id=3, params=0x7fffe4015820, nparams=1, flags=0) at ../../../libvirt/src/qemu/qemu_driver.c:5755 #3 0x00007ffff7e26f18 in virDomainSetIOThreadParams (domain=0x7fff94014c10, iothread_id=3, params=0x7fffe4015820, nparams=1, flags=0) at ../../../libvirt/src/libvirt-domain.c:8422 #4 0x00005555555933d5 in remoteDispatchDomainSetIOThreadParams (server=0x555555628080, client=0x5555556eb230, msg=0x5555556ee930, rerr=0x7ffff4c1e800, args=0x7fffe43c4650) at src/remote/remote_daemon_dispatch_stubs.h:11062 #5 0x00005555555932cc in remoteDispatchDomainSetIOThreadParamsHelper (server=0x555555628080, client=0x5555556eb230, msg=0x5555556ee930, rerr=0x7ffff4c1e800, args=0x7fffe43c4650, ret=0x0) at src/remote/remote_daemon_dispatch_stubs.h:11032 #6 0x00007ffff7c906b6 in virNetServerProgramDispatchCall (prog=0x5555556dcc10, server=0x555555628080, client=0x5555556eb230, msg=0x5555556ee930) at ../../../libvirt/src/rpc/virnetserverprogram.c:428 #7 0x00007ffff7c901fb in virNetServerProgramDispatch (prog=0x5555556dcc10, server=0x555555628080, client=0x5555556eb230, msg=0x5555556ee930) at ../../../libvirt/src/rpc/virnetserverprogram.c:302 #8 0x00007ffff7c98f63 in virNetServerProcessMsg (srv=0x555555628080, client=0x5555556eb230, prog=0x5555556dcc10, msg=0x5555556ee930) at ../../../libvirt/src/rpc/virnetserver.c:136 #9 0x00007ffff7c99029 in virNetServerHandleJob (jobOpaque=0x5555556f05e0, opaque=0x555555628080) at ../../../libvirt/src/rpc/virnetserver.c:156 #10 0x00007ffff7b7628b in virThreadPoolWorker (opaque=0x555555624ec0) at ../../../libvirt/src/util/virthreadpool.c:164 #11 0x00007ffff7b7572a in virThreadHelper (data=0x555555624ee0) at ../../../libvirt/src/util/virthread.c:256 #12 0x00007ffff74abe1d in start_thread () at /lib64/libc.so.6 #13 0x00007ffff75315e0 in clone3 () at /lib64/libc.so.6

Since virsh implements a wrapper over virDomainSetIOThreadParams() (command iothreadset) let's wire up new typed parameters there too. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/manpages/virsh.rst | 7 ++++++- tools/virsh-domain.c | 24 +++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst index 965b89c99d..8c066c9176 100644 --- a/docs/manpages/virsh.rst +++ b/docs/manpages/virsh.rst @@ -2993,7 +2993,8 @@ iothreadset :: iothreadset domain iothread_id [[--poll-max-ns ns] [--poll-grow factor] - [--poll-shrink divisor]] + [--poll-shrink divisor] [--thread-pool-min value] + [--thread-pool-max value]] [[--config] [--live] | [--current]] Modifies an existing iothread of the domain using the specified @@ -3010,6 +3011,10 @@ for a running guest. Saving, destroying, stopping, etc. the guest will result in the polling values returning to hypervisor defaults at the next start, restore, etc. +The *--thread-pool-min* and *--thread-pool-max* options then set lower and +upper bound, respectively of number of threads in worker pool of given +iothread. + If *--live* is specified, affect a running guest. If the guest is not running an error is returned. If *--current* is specified or *--live* is not specified, then handle diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index bac4cc0fb6..53e85ddfac 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -7782,7 +7782,15 @@ static const vshCmdOptDef opts_iothreadset[] = { }, {.name = "poll-shrink", .type = VSH_OT_INT, - .help = N_("set the value for reduction of the IOThread polling time ") + .help = N_("set the value for reduction of the IOThread polling time") + }, + {.name = "thread-pool-min", + .type = VSH_OT_INT, + .help = N_("lower boundary for worker thread pool") + }, + {.name = "thread-pool-max", + .type = VSH_OT_INT, + .help = N_("upper boundary for worker thread pool") }, VIRSH_COMMON_OPT_DOMAIN_LIVE, VIRSH_COMMON_OPT_DOMAIN_CURRENT, @@ -7802,6 +7810,7 @@ cmdIOThreadSet(vshControl *ctl, const vshCmd *cmd) int maxparams = 0; unsigned long long poll_max; unsigned int poll_val; + int thread_val; int rc; if (live) @@ -7839,6 +7848,19 @@ cmdIOThreadSet(vshControl *ctl, const vshCmd *cmd) #undef VSH_IOTHREAD_SET_UINT_PARAMS +#define VSH_IOTHREAD_SET_INT_PARAMS(opt, param) \ + thread_val = -1; \ + if ((rc = vshCommandOptInt(ctl, cmd, opt, &thread_val)) < 0) \ + goto cleanup; \ + if (rc > 0 && \ + virTypedParamsAddInt(¶ms, &nparams, &maxparams, \ + param, thread_val) < 0) \ + goto save_error; + + VSH_IOTHREAD_SET_INT_PARAMS("thread-pool-min", VIR_DOMAIN_IOTHREAD_THREAD_POOL_MIN) + VSH_IOTHREAD_SET_INT_PARAMS("thread-pool-max", VIR_DOMAIN_IOTHREAD_THREAD_POOL_MAX) +#undef VSH_IOTHREAD_SET_INT_PARAMS + if (virDomainSetIOThreadParams(dom, id, params, nparams, flags) < 0) goto cleanup; -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:56 +0200, Michal Privoznik wrote:
Since virsh implements a wrapper over virDomainSetIOThreadParams() (command iothreadset) let's wire up new typed parameters there too.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/manpages/virsh.rst | 7 ++++++- tools/virsh-domain.c | 24 +++++++++++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-)
diff --git a/docs/manpages/virsh.rst b/docs/manpages/virsh.rst index 965b89c99d..8c066c9176 100644 --- a/docs/manpages/virsh.rst +++ b/docs/manpages/virsh.rst @@ -2993,7 +2993,8 @@ iothreadset ::
iothreadset domain iothread_id [[--poll-max-ns ns] [--poll-grow factor] - [--poll-shrink divisor]] + [--poll-shrink divisor] [--thread-pool-min value] + [--thread-pool-max value]] [[--config] [--live] | [--current]]
Modifies an existing iothread of the domain using the specified @@ -3010,6 +3011,10 @@ for a running guest. Saving, destroying, stopping, etc. the guest will result in the polling values returning to hypervisor defaults at the next start, restore, etc.
+The *--thread-pool-min* and *--thread-pool-max* options then set lower and +upper bound, respectively of number of threads in worker pool of given +iothread.
We probably should mention that in certain cases if you pick too high 'min' you'll need to set 'max' first. Reviewed-by: Peter Krempa <pkrempa@redhat.com>

As of v7.0.0-877-g70ac26b9e5 QEMU exposes its main event loop as an QMP object. In the very next commit (v7.0.0-878-g71ad4713cc) it was extended for thread-pool-min and thread-pool-max attributes. Expose them under new <mainloop/> element. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/formatdomain.rst | 9 +++ src/conf/domain_conf.c | 63 +++++++++++++++++++ src/conf/domain_conf.h | 8 +++ src/conf/schemas/domaincommon.rng | 15 +++++ src/conf/virconftypes.h | 2 + .../iothreads-ids-pool-sizes.xml | 1 + 6 files changed, 98 insertions(+) diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst index 2aa39b2f63..22eb540ba3 100644 --- a/docs/formatdomain.rst +++ b/docs/formatdomain.rst @@ -677,6 +677,7 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` <iothread id="6"/> <iothread id="8" thread_pool_min="2" thread_pool_max="32"/> </iothreadids> + <defaultiothread thread_pool_min="8" thread_pool_max="16"> ... </domain> @@ -699,6 +700,14 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` The element has two optional attributes ``thread_pool_min`` and ``hread_pool_max`` which allow setting lower and upper boundary for number of worker threads for given IOThread. :since:`Since 8.5.0` +``defaultiothread`` + The element then can have ``thread_pool_min`` and/or ``thread_pool_max`` + attributes, which control the lower and upper boundary for number of worker + threads for the emulator. Emulator might be multithreaded and spawn so + called worker threads on demand. In general neither of these attributes + should be set (leaving the emulator use its own default values), unless the + emulator runs in a real time workload and thus can't afford unpredictability + of time it takes to spawn new worker threads. :since:`Since 8.5.0` CPU Tuning diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 8891289866..b1e7c2ef22 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -3824,6 +3824,8 @@ void virDomainDefFree(virDomainDef *def) virDomainIOThreadIDDefArrayFree(def->iothreadids, def->niothreadids); + g_free(def->defaultIOThread); + virBitmapFree(def->cputune.emulatorpin); g_free(def->cputune.emulatorsched); @@ -17016,6 +17018,7 @@ virDomainIdmapDefParseXML(xmlXPathContextPtr ctxt, * <iothread id='5'/> * <iothread id='7'/> * </iothreadids> + * <defaultiothread thread_pool_min="8" thread_pool_max="8"/> */ static virDomainIOThreadIDDef * virDomainIOThreadIDDefParseXML(xmlNodePtr node) @@ -17041,6 +17044,38 @@ virDomainIOThreadIDDefParseXML(xmlNodePtr node) } +static int +virDomainDefaultIOThreadDefParse(virDomainDef *def, + xmlXPathContextPtr ctxt) +{ + xmlNodePtr node = NULL; + g_autofree virDomainDefaultIOThreadDef *thrd = NULL; + + node = virXPathNode("./defaultiothread", ctxt); + if (!node) + return 0; + + thrd = g_new0(virDomainDefaultIOThreadDef, 1); + + if (virXMLPropInt(node, "thread_pool_min", 10, + VIR_XML_PROP_NONNEGATIVE, + &thrd->thread_pool_min, -1) < 0) + return -1; + + if (virXMLPropInt(node, "thread_pool_max", 10, + VIR_XML_PROP_NONNEGATIVE, + &thrd->thread_pool_max, -1) < 0) + return -1; + + if (thrd->thread_pool_min == -1 && + thrd->thread_pool_max == -1) + return 0; + + def->defaultIOThread = g_steal_pointer(&thrd); + return 0; +} + + static int virDomainDefParseIOThreads(virDomainDef *def, xmlXPathContextPtr ctxt) @@ -17058,6 +17093,9 @@ virDomainDefParseIOThreads(virDomainDef *def, return -1; } + if (virDomainDefaultIOThreadDefParse(def, ctxt) < 0) + return -1; + /* Extract any iothread id's defined */ if ((n = virXPathNodeSet("./iothreadids/iothread", ctxt, &nodes)) < 0) return -1; @@ -27603,6 +27641,29 @@ virDomainDefIothreadShouldFormat(const virDomainDef *def) } +static void +virDomainDefaultIOThreadDefFormat(virBuffer *buf, + const virDomainDef *def) +{ + virBuffer attrBuf = VIR_BUFFER_INITIALIZER; + + if (!def->defaultIOThread) + return; + + if (def->defaultIOThread->thread_pool_min >= 0) { + virBufferAsprintf(&attrBuf, " thread_pool_min='%d'", + def->defaultIOThread->thread_pool_min); + } + + if (def->defaultIOThread->thread_pool_max >= 0) { + virBufferAsprintf(&attrBuf, " thread_pool_max='%d'", + def->defaultIOThread->thread_pool_max); + } + + virXMLFormatElement(buf, "defaultiothread", &attrBuf, NULL); +} + + static void virDomainDefIOThreadsFormat(virBuffer *buf, const virDomainDef *def) @@ -27640,6 +27701,8 @@ virDomainDefIOThreadsFormat(virBuffer *buf, } virXMLFormatElement(buf, "iothreadids", NULL, &childrenBuf); + + virDomainDefaultIOThreadDefFormat(buf, def); } diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h index 2a5001f15c..98b8ce703c 100644 --- a/src/conf/domain_conf.h +++ b/src/conf/domain_conf.h @@ -2655,6 +2655,12 @@ void virDomainIOThreadIDDefFree(virDomainIOThreadIDDef *def); G_DEFINE_AUTOPTR_CLEANUP_FUNC(virDomainIOThreadIDDef, virDomainIOThreadIDDefFree); +struct _virDomainDefaultIOThreadDef { + int thread_pool_min; + int thread_pool_max; +}; + + struct _virDomainCputune { unsigned long long shares; bool sharesSpecified; @@ -2863,6 +2869,8 @@ struct _virDomainDef { size_t niothreadids; virDomainIOThreadIDDef **iothreadids; + virDomainDefaultIOThreadDef *defaultIOThread; + virDomainCputune cputune; virDomainResctrlDef **resctrls; diff --git a/src/conf/schemas/domaincommon.rng b/src/conf/schemas/domaincommon.rng index 7e6310832b..e64900d0cf 100644 --- a/src/conf/schemas/domaincommon.rng +++ b/src/conf/schemas/domaincommon.rng @@ -844,6 +844,21 @@ </element> </optional> + <optional> + <element name="defaultiothread"> + <optional> + <attribute name="thread_pool_min"> + <ref name="unsignedLong"/> + </attribute> + </optional> + <optional> + <attribute name="thread_pool_max"> + <ref name="unsignedLong"/> + </attribute> + </optional> + </element> + </optional> + <optional> <ref name="blkiotune"/> </optional> diff --git a/src/conf/virconftypes.h b/src/conf/virconftypes.h index 21420ba8ea..c3f1c5fa01 100644 --- a/src/conf/virconftypes.h +++ b/src/conf/virconftypes.h @@ -142,6 +142,8 @@ typedef struct _virDomainIOMMUDef virDomainIOMMUDef; typedef struct _virDomainIOThreadIDDef virDomainIOThreadIDDef; +typedef struct _virDomainDefaultIOThreadDef virDomainDefaultIOThreadDef; + typedef struct _virDomainIdMapDef virDomainIdMapDef; typedef struct _virDomainIdMapEntry virDomainIdMapEntry; diff --git a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml index 0f93d14f12..4cebdfada9 100644 --- a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml +++ b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.xml @@ -12,6 +12,7 @@ <iothread id='3'/> <iothread id='5'/> </iothreadids> + <defaultiothread thread_pool_min='8' thread_pool_max='16'/> <os> <type arch='x86_64' machine='q35'>hvm</type> <boot dev='hd'/> -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:57 +0200, Michal Privoznik wrote:
As of v7.0.0-877-g70ac26b9e5 QEMU exposes its main event loop as an QMP object. In the very next commit (v7.0.0-878-g71ad4713cc) it was extended for thread-pool-min and thread-pool-max attributes. Expose them under new <mainloop/> element.
Please fix the summary and commit message. Make sure to mention the name of the element.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- docs/formatdomain.rst | 9 +++ src/conf/domain_conf.c | 63 +++++++++++++++++++ src/conf/domain_conf.h | 8 +++ src/conf/schemas/domaincommon.rng | 15 +++++ src/conf/virconftypes.h | 2 + .../iothreads-ids-pool-sizes.xml | 1 + 6 files changed, 98 insertions(+)
diff --git a/docs/formatdomain.rst b/docs/formatdomain.rst index 2aa39b2f63..22eb540ba3 100644 --- a/docs/formatdomain.rst +++ b/docs/formatdomain.rst @@ -677,6 +677,7 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` <iothread id="6"/> <iothread id="8" thread_pool_min="2" thread_pool_max="32"/> </iothreadids> + <defaultiothread thread_pool_min="8" thread_pool_max="16"> ... </domain>
@@ -699,6 +700,14 @@ host/guest with many LUNs. :since:`Since 1.2.8 (QEMU only)` The element has two optional attributes ``thread_pool_min`` and ``hread_pool_max`` which allow setting lower and upper boundary for number of worker threads for given IOThread. :since:`Since 8.5.0` +``defaultiothread`` + The element then can have ``thread_pool_min`` and/or ``thread_pool_max`` + attributes, which control the lower and upper boundary for number of worker + threads for the emulator. Emulator might be multithreaded and spawn so + called worker threads on demand. In general neither of these attributes + should be set (leaving the emulator use its own default values), unless the + emulator runs in a real time workload and thus can't afford unpredictability + of time it takes to spawn new worker threads. :since:`Since 8.5.0`
This description doesn't seem to capture what actually the 'defaultiothread' element represents. It starts right away by describing it's arguments. Users might be confused. I don't have a specific suggestion though. The code looks good: Reviewed-by: Peter Krempa <pkrempa@redhat.com>

Since the main-loop and iothread classes are derived from the same class (EventLoopBaseClass) we don't need new capability and can use QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX directly to check whether QEMU's capable of setting worker pool size. Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_validate.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c index 7d11ae2c92..c2495fb6b8 100644 --- a/src/qemu/qemu_validate.c +++ b/src/qemu/qemu_validate.c @@ -401,6 +401,15 @@ qemuValidateDomainDefIOThreads(const virDomainDef *def, } } + if (def->defaultIOThread && + ((def->defaultIOThread->thread_pool_min >= 0 || + def->defaultIOThread->thread_pool_max >= 0) && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX))) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("pool_min and pool_max is not supported by this QEMU binary")); + return -1; + } + return 0; } -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:58 +0200, Michal Privoznik wrote:
Since the main-loop and iothread classes are derived from the same class (EventLoopBaseClass) we don't need new capability and can use QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX directly to check whether QEMU's capable of setting worker pool size.
Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_validate.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/src/qemu/qemu_validate.c b/src/qemu/qemu_validate.c index 7d11ae2c92..c2495fb6b8 100644 --- a/src/qemu/qemu_validate.c +++ b/src/qemu/qemu_validate.c @@ -401,6 +401,15 @@ qemuValidateDomainDefIOThreads(const virDomainDef *def, } }
+ if (def->defaultIOThread && + ((def->defaultIOThread->thread_pool_min >= 0 || + def->defaultIOThread->thread_pool_max >= 0) && + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_IOTHREAD_THREAD_POOL_MAX))) { + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", + _("pool_min and pool_max is not supported by this QEMU binary"));
Old field names in the error message. Also same problem as with regular iothread pool sizes in regards to max being less than min. Reviewed-by: Peter Krempa <pkrempa@redhat.com>

Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2059511 Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_command.c | 16 +++++++++++++--- .../iothreads-ids-pool-sizes.x86_64-latest.args | 1 + 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index 2734be7679..be20053c0d 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -7436,9 +7436,6 @@ qemuBuildIOThreadCommandLine(virCommand *cmd, { size_t i; - if (def->niothreadids == 0) - return 0; - for (i = 0; i < def->niothreadids; i++) { g_autoptr(virJSONValue) props = NULL; const virDomainIOThreadIDDef *iothread = def->iothreadids[i]; @@ -7456,6 +7453,19 @@ qemuBuildIOThreadCommandLine(virCommand *cmd, return -1; } + if (def->defaultIOThread) { + g_autoptr(virJSONValue) props = NULL; + + if (qemuMonitorCreateObjectProps(&props, "main-loop", "main-loop", + "k:thread-pool-min", def->defaultIOThread->thread_pool_min, + "k:thread-pool-max", def->defaultIOThread->thread_pool_max, + NULL) < 0) + return -1; + + if (qemuBuildObjectCommandlineFromJSON(cmd, props, qemuCaps) < 0) + return -1; + } + return 0; } diff --git a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args index 32517e3d29..12747b51f0 100644 --- a/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args +++ b/tests/qemuxml2argvdata/iothreads-ids-pool-sizes.x86_64-latest.args @@ -22,6 +22,7 @@ XDG_CONFIG_HOME=/tmp/lib/domain--1-QEMUGuest1/.config \ -object '{"qom-type":"iothread","id":"iothread1"}' \ -object '{"qom-type":"iothread","id":"iothread3"}' \ -object '{"qom-type":"iothread","id":"iothread5"}' \ +-object '{"qom-type":"main-loop","id":"main-loop","thread-pool-min":8,"thread-pool-max":16}' \ -uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \ -display none \ -no-user-config \ -- 2.35.1

On Tue, Jun 07, 2022 at 14:52:59 +0200, Michal Privoznik wrote:
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=2059511 Signed-off-by: Michal Privoznik <mprivozn@redhat.com> --- src/qemu/qemu_command.c | 16 +++++++++++++--- .../iothreads-ids-pool-sizes.x86_64-latest.args | 1 + 2 files changed, 14 insertions(+), 3 deletions(-)
Reviewed-by: Peter Krempa <pkrempa@redhat.com>
participants (3)
-
Michal Privoznik
-
Michal Prívozník
-
Peter Krempa