the first user is the qemu driver,
virsh save/resume would slow to a crawl with a default pipe size (64k).
This improves the situation by 400%.
Going through io_helper still seems to incur in some penalty (~15%-ish)
compared with direct qemu migration to a nc socket to a file.
Signed-off-by: Claudio Fontana <cfontana(a)suse.de>
---
src/qemu/qemu_driver.c | 6 +++---
src/qemu/qemu_saveimage.c | 11 ++++++-----
src/util/virfile.c | 12 ++++++++++++
src/util/virfile.h | 1 +
4 files changed, 22 insertions(+), 8 deletions(-)
Hello, I initially thought this to be a qemu performance issue,
so you can find the discussion about this in qemu-devel:
"Re: bad virsh save /dev/null performance (600 MiB/s max)"
https://lists.gnu.org/archive/html/qemu-devel/2022-03/msg03142.html
RFC since need to validate idea, and it is only lightly tested:
save - about 400% benefit in throughput, getting around 20 Gbps to /dev/null,
and around 13 Gbps to a ramdisk.
By comparison, direct qemu migration to a nc socket is around 24Gbps.
restore - not tested, _should_ also benefit in the "bypass_cache" case
coredump - not tested, _should_ also benefit like for save
Thanks for your comments and review,
Claudio
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index c1b3bd8536..be248c1e92 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -3044,7 +3044,7 @@ doCoreDump(virQEMUDriver *driver,
virFileWrapperFd *wrapperFd = NULL;
int directFlag = 0;
bool needUnlink = false;
- unsigned int flags = VIR_FILE_WRAPPER_NON_BLOCKING;
+ unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING |
VIR_FILE_WRAPPER_BIG_PIPE;
const char *memory_dump_format = NULL;
g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver);
g_autoptr(virCommand) compressor = NULL;
@@ -3059,7 +3059,7 @@ doCoreDump(virQEMUDriver *driver,
/* Create an empty file with appropriate ownership. */
if (dump_flags & VIR_DUMP_BYPASS_CACHE) {
- flags |= VIR_FILE_WRAPPER_BYPASS_CACHE;
+ wrapperFlags |= VIR_FILE_WRAPPER_BYPASS_CACHE;
directFlag = virFileDirectFdFlag();
if (directFlag < 0) {
virReportError(VIR_ERR_OPERATION_FAILED, "%s",
@@ -3072,7 +3072,7 @@ doCoreDump(virQEMUDriver *driver,
&needUnlink)) < 0)
goto cleanup;
- if (!(wrapperFd = virFileWrapperFdNew(&fd, path, flags)))
+ if (!(wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags)))
goto cleanup;
if (dump_flags & VIR_DUMP_MEMORY_ONLY) {
diff --git a/src/qemu/qemu_saveimage.c b/src/qemu/qemu_saveimage.c
index c0139041eb..1b522a1542 100644
--- a/src/qemu/qemu_saveimage.c
+++ b/src/qemu/qemu_saveimage.c
@@ -267,7 +267,7 @@ qemuSaveImageCreate(virQEMUDriver *driver,
int fd = -1;
int directFlag = 0;
virFileWrapperFd *wrapperFd = NULL;
- unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING;
+ unsigned int wrapperFlags = VIR_FILE_WRAPPER_NON_BLOCKING |
VIR_FILE_WRAPPER_BIG_PIPE;
/* Obtain the file handle. */
if ((flags & VIR_DOMAIN_SAVE_BYPASS_CACHE)) {
@@ -463,10 +463,11 @@ qemuSaveImageOpen(virQEMUDriver *driver,
if ((fd = qemuDomainOpenFile(cfg, NULL, path, oflags, NULL)) < 0)
return -1;
- if (bypass_cache &&
- !(*wrapperFd = virFileWrapperFdNew(&fd, path,
- VIR_FILE_WRAPPER_BYPASS_CACHE)))
- return -1;
+ if (bypass_cache) {
+ unsigned int wrapperFlags = VIR_FILE_WRAPPER_BYPASS_CACHE |
VIR_FILE_WRAPPER_BIG_PIPE;
+ if (!(*wrapperFd = virFileWrapperFdNew(&fd, path, wrapperFlags)))
+ return -1;
+ }
data = g_new0(virQEMUSaveData, 1);
diff --git a/src/util/virfile.c b/src/util/virfile.c
index a04f888e06..fdacd17890 100644
--- a/src/util/virfile.c
+++ b/src/util/virfile.c
@@ -282,6 +282,18 @@ virFileWrapperFdNew(int *fd, const char *name, unsigned int flags)
ret->cmd = virCommandNewArgList(iohelper_path, name, NULL);
+ if (flags & VIR_FILE_WRAPPER_BIG_PIPE) {
+ /*
+ * virsh save/resume would slow to a crawl with a default pipe size (usually
64k).
+ * This improves the situation by 400%, although going through io_helper still
incurs
+ * in a performance penalty compared with a direct qemu migration to a socket.
+ */
+ int pipe_sz, rv = virFileReadValueInt(&pipe_sz,
"/proc/sys/fs/pipe-max-size");
This is fine as an experiment but I don't think it is that safe
to use in the real world. There could be a variety of reasons why
an admin can enlarge this value, and we shouldn't assume the max
size is sensible for libvirt/QEMU to use.
I very much suspect there are diminishing returns here in terms
of buffer sizes.
64k is obvious too small, but 1 MB, may be sufficiently large
that the bottleneck is then elsewhere in our code. IOW, If the
pipe max size is 100 MB, we shouldn't blindly use it. Can you
do a few tests with varying sizes to see where a sensible
tradeoff falls ?
+ if (rv != 0) {
+ pipe_sz = 1024 * 1024; /* common default for pipe-max-size */
+ }
+ fcntl(pipefd[output ? 0 : 1], F_SETPIPE_SZ, pipe_sz);
+ }