[PATCHv2 0/5] qemu: Introduce nvme disk emulation support
by honglei.wang@smartx.com
From: hongleiwang <honglei.wang(a)smartx.com>
QEMU has supported nvme disk emulation for a long time,
see: https://qemu-project.gitlab.io/qemu/system/devices/nvme.html.
The following patches introduce nvme-ns disk bus type:
A disk with nvme-ns as bus is represented as an nvme namespace
and needs to be attached to an nvme controller. In XML, it can be
used like this:
<devices>
...
<disk type='file' device='disk'>
<driver name='qemu' type='raw'/>
<source file='/tmp/data.img'/>
<target dev='nvmensa' bus='nvme-ns'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<controller type='nvme' index='0'>
<serial>nvme-controller-serial-value</serial>
<address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
</controller>
...
</devices>
Signed-off-by: ray <honglei.wang(a)smartx.com>
---
Compared to patch v1, this version removes the nvme bus type implementation
and keeps only the nvme controller + nvme-ns bus approach.
ray (5):
qemu: Add support for NVMe namespace disk bus type
qemu_capabilities: Add support for nvme-ns bus capabilities
schema: Add nvme controller and nvme-ns bus defination
tests: Add test case for nvme-ns device configuration
NEWS: Document qemu nvme disk emulation feature
NEWS.rst | 17 +++++++++
src/conf/domain_conf.c | 39 ++++++++++++++++++++
src/conf/domain_conf.h | 7 ++++
src/conf/domain_postparse.c | 2 ++
src/conf/domain_validate.c | 4 ++-
src/conf/schemas/domaincommon.rng | 11 +++++-
src/conf/virconftypes.h | 2 ++
src/hyperv/hyperv_driver.c | 2 ++
src/qemu/qemu_alias.c | 1 +
src/qemu/qemu_capabilities.c | 5 +++
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 26 ++++++++++++++
src/qemu/qemu_domain_address.c | 5 +++
src/qemu/qemu_hotplug.c | 14 ++++++--
src/qemu/qemu_postparse.c | 1 +
src/qemu/qemu_validate.c | 18 ++++++++++
src/test/test_driver.c | 2 ++
src/util/virutil.c | 2 +-
src/vbox/vbox_common.c | 2 ++
src/vmx/vmx.c | 1 +
.../qemu_10.0.0-q35.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_10.0.0-q35.x86_64.xml | 1 +
.../qemu_10.0.0-tcg.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_10.0.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_10.0.0.s390x.xml | 1 +
tests/domaincapsdata/qemu_10.0.0.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_10.0.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_4.2.0-virt.aarch64.xml | 1 +
tests/domaincapsdata/qemu_4.2.0.aarch64.xml | 1 +
tests/domaincapsdata/qemu_4.2.0.ppc64.xml | 1 +
.../domaincapsdata/qemu_5.0.0-tcg-virt.riscv64.xml | 1 +
tests/domaincapsdata/qemu_5.0.0-virt.aarch64.xml | 1 +
tests/domaincapsdata/qemu_5.0.0-virt.riscv64.xml | 1 +
tests/domaincapsdata/qemu_5.0.0.aarch64.xml | 1 +
tests/domaincapsdata/qemu_5.0.0.ppc64.xml | 1 +
tests/domaincapsdata/qemu_5.1.0.sparc.xml | 1 +
tests/domaincapsdata/qemu_6.2.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_6.2.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_6.2.0.ppc64.xml | 1 +
tests/domaincapsdata/qemu_6.2.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.0.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.0.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.0.0.ppc64.xml | 1 +
tests/domaincapsdata/qemu_7.0.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.1.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.1.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.1.0.ppc64.xml | 1 +
tests/domaincapsdata/qemu_7.1.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.2.0-hvf.x86_64+hvf.xml | 1 +
tests/domaincapsdata/qemu_7.2.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.2.0-tcg.x86_64+hvf.xml | 1 +
tests/domaincapsdata/qemu_7.2.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_7.2.0.ppc.xml | 1 +
tests/domaincapsdata/qemu_7.2.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.0.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.0.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.0.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.1.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.1.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.1.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.2.0-q35.x86_64.xml | 1 +
.../qemu_8.2.0-tcg-virt.loongarch64.xml | 1 +
tests/domaincapsdata/qemu_8.2.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_8.2.0-virt.aarch64.xml | 1 +
.../domaincapsdata/qemu_8.2.0-virt.loongarch64.xml | 1 +
tests/domaincapsdata/qemu_8.2.0.aarch64.xml | 1 +
tests/domaincapsdata/qemu_8.2.0.armv7l.xml | 1 +
tests/domaincapsdata/qemu_8.2.0.s390x.xml | 1 +
tests/domaincapsdata/qemu_8.2.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.0.0-q35.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.0.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.0.0.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.1.0-q35.x86_64.xml | 1 +
.../domaincapsdata/qemu_9.1.0-tcg-virt.riscv64.xml | 1 +
tests/domaincapsdata/qemu_9.1.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.1.0-virt.riscv64.xml | 1 +
tests/domaincapsdata/qemu_9.1.0.s390x.xml | 1 +
tests/domaincapsdata/qemu_9.1.0.x86_64.xml | 1 +
.../domaincapsdata/qemu_9.2.0-hvf.aarch64+hvf.xml | 1 +
.../qemu_9.2.0-q35.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_9.2.0-q35.x86_64.xml | 1 +
.../qemu_9.2.0-tcg.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_9.2.0-tcg.x86_64.xml | 1 +
tests/domaincapsdata/qemu_9.2.0.s390x.xml | 1 +
tests/domaincapsdata/qemu_9.2.0.x86_64+amdsev.xml | 1 +
tests/domaincapsdata/qemu_9.2.0.x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_10.0.0_s390x.xml | 1 +
.../caps_10.0.0_x86_64+amdsev.xml | 1 +
tests/qemucapabilitiesdata/caps_10.0.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_6.2.0_ppc64.xml | 1 +
tests/qemucapabilitiesdata/caps_6.2.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.0.0_ppc64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.0.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.1.0_ppc64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.1.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_7.2.0_ppc.xml | 1 +
.../qemucapabilitiesdata/caps_7.2.0_x86_64+hvf.xml | 1 +
tests/qemucapabilitiesdata/caps_7.2.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.0.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.1.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.2.0_aarch64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.2.0_armv7l.xml | 1 +
.../caps_8.2.0_loongarch64.xml | 1 +
tests/qemucapabilitiesdata/caps_8.2.0_s390x.xml | 1 +
tests/qemucapabilitiesdata/caps_8.2.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_9.0.0_x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_9.1.0_riscv64.xml | 1 +
tests/qemucapabilitiesdata/caps_9.1.0_s390x.xml | 1 +
tests/qemucapabilitiesdata/caps_9.1.0_x86_64.xml | 1 +
.../caps_9.2.0_aarch64+hvf.xml | 1 +
tests/qemucapabilitiesdata/caps_9.2.0_s390x.xml | 1 +
.../caps_9.2.0_x86_64+amdsev.xml | 1 +
tests/qemucapabilitiesdata/caps_9.2.0_x86_64.xml | 1 +
.../disk-nvme-ns-device.x86_64-latest.args | 36 +++++++++++++++++++
.../disk-nvme-ns-device.x86_64-latest.xml | 42 ++++++++++++++++++++++
tests/qemuxmlconfdata/disk-nvme-ns-device.xml | 41 +++++++++++++++++++++
tests/qemuxmlconftest.c | 1 +
117 files changed, 370 insertions(+), 5 deletions(-)
create mode 100644 tests/qemuxmlconfdata/disk-nvme-ns-device.x86_64-latest.args
create mode 100644 tests/qemuxmlconfdata/disk-nvme-ns-device.x86_64-latest.xml
create mode 100644 tests/qemuxmlconfdata/disk-nvme-ns-device.xml
--
2.11.0
2 months, 2 weeks
[PATCH] qemu: fix qemuMigrationCapability enum
by Dmitry Frolov
Enum variable of type qemuMigrationCapability is checked for zero in
src/qemu/qemu_migration_params.c:729.
"if (item->optional) { ..."
Actualy, QEMU_MIGRATION_CAP_XBZRLE enum constant has value 0.
Thus, all uninitialized .optinnal fields of the static array
qemuMigrationParamsFlagMap[] will be implicitly initialized with
value 0 (QEMU_MIGRATION_CAP_XBZRLE).
To my opinion, introducing a separate enum for optional capabilities,
would be a better solution.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Signed-off-by: Dmitry Frolov <frolov(a)swemel.ru>
---
src/qemu/qemu_migration_params.c | 16 +++++++++++-----
src/qemu/qemu_migration_params.h | 12 ++++++++++--
2 files changed, 21 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_migration_params.c b/src/qemu/qemu_migration_params.c
index c10660d6f2..23c463dbbb 100644
--- a/src/qemu/qemu_migration_params.c
+++ b/src/qemu/qemu_migration_params.c
@@ -104,6 +104,11 @@ VIR_ENUM_IMPL(qemuMigrationCapability,
"dirty-bitmaps",
"return-path",
"zero-copy-send",
+);
+
+VIR_ENUM_IMPL(qemuMigrationOptCap,
+ QEMU_MIGRATION_OPTCAP_LAST,
+ "none",
"postcopy-preempt",
"switchover-ack",
);
@@ -152,7 +157,7 @@ struct _qemuMigrationParamsFlagMapItem {
/* An optional capability to set in addition to @cap in case it is
* supported. Depending on @part either one or both sides of migration
* has to support the optional capability to be enabled. */
- qemuMigrationCapability optional;
+ qemuMigrationOptCap optional;
/* Bit-wise OR of qemuMigrationParty. Determines whether the capability has
* to be enabled on the source, on the destination, or on both sides of
* migration. */
@@ -200,7 +205,7 @@ static const qemuMigrationParamsFlagMapItem qemuMigrationParamsFlagMap[] = {
{.match = QEMU_MIGRATION_FLAG_REQUIRED,
.flag = VIR_MIGRATE_POSTCOPY,
.cap = QEMU_MIGRATION_CAP_POSTCOPY,
- .optional = QEMU_MIGRATION_CAP_POSTCOPY_PREEMPT,
+ .optional = QEMU_MIGRATION_OPTCAP_POSTCOPY_PREEMPT,
.party = QEMU_MIGRATION_SOURCE | QEMU_MIGRATION_DESTINATION},
{.match = QEMU_MIGRATION_FLAG_REQUIRED,
@@ -211,7 +216,7 @@ static const qemuMigrationParamsFlagMapItem qemuMigrationParamsFlagMap[] = {
{.match = QEMU_MIGRATION_FLAG_FORBIDDEN,
.flag = VIR_MIGRATE_TUNNELLED,
.cap = QEMU_MIGRATION_CAP_RETURN_PATH,
- .optional = QEMU_MIGRATION_CAP_SWITCHOVER_ACK,
+ .optional = QEMU_MIGRATION_OPTCAP_SWITCHOVER_ACK,
.party = QEMU_MIGRATION_SOURCE | QEMU_MIGRATION_DESTINATION},
{.match = QEMU_MIGRATION_FLAG_REQUIRED,
@@ -725,8 +730,9 @@ qemuMigrationParamsFromFlags(virTypedParameterPtr params,
qemuMigrationCapabilityTypeToString(item->cap));
ignore_value(virBitmapSetBit(migParams->caps, item->cap));
- if (item->optional) {
- qemuMigrationCapability opt = item->optional;
+ if (item->optional > QEMU_MIGRATION_OPTCAP_NONE &&
+ item->optional < QEMU_MIGRATION_OPTCAP_LAST) {
+ qemuMigrationOptCap opt = item->optional;
ignore_value(virBitmapSetBit(migParams->optional, opt));
if (item->party != party)
ignore_value(virBitmapSetBit(migParams->remoteOptional, opt));
diff --git a/src/qemu/qemu_migration_params.h b/src/qemu/qemu_migration_params.h
index 17fc63f527..3246b8487e 100644
--- a/src/qemu/qemu_migration_params.h
+++ b/src/qemu/qemu_migration_params.h
@@ -40,13 +40,21 @@ typedef enum {
QEMU_MIGRATION_CAP_BLOCK_DIRTY_BITMAPS,
QEMU_MIGRATION_CAP_RETURN_PATH,
QEMU_MIGRATION_CAP_ZERO_COPY_SEND,
- QEMU_MIGRATION_CAP_POSTCOPY_PREEMPT,
- QEMU_MIGRATION_CAP_SWITCHOVER_ACK,
QEMU_MIGRATION_CAP_LAST
} qemuMigrationCapability;
VIR_ENUM_DECL(qemuMigrationCapability);
+typedef enum {
+ QEMU_MIGRATION_OPTCAP_NONE,
+ QEMU_MIGRATION_OPTCAP_POSTCOPY_PREEMPT,
+ QEMU_MIGRATION_OPTCAP_SWITCHOVER_ACK,
+
+ QEMU_MIGRATION_OPTCAP_LAST
+} qemuMigrationOptCap;
+VIR_ENUM_DECL(qemuMigrationOptCap);
+
+
typedef enum {
QEMU_MIGRATION_PARAM_COMPRESS_LEVEL,
QEMU_MIGRATION_PARAM_COMPRESS_THREADS,
--
2.34.1
2 months, 2 weeks
[PATCH v2 0/5] docs: automated info about machine deprecation/removal info
by Daniel P. Berrangé
Since we deprecate and remove versioned machine types on a fixed
schedule, we can automatically ensure that the docs reflect the
latest version info, rather than requiring manual updates on each
dev cycle.
The first patch in this series removes the hack which postponed
automatic removal of versioned machine types to the 10.1.0 release,
since we're now in the 10.1.0 dev cycle.
The second patch in this series fixes the logic to ensure dev snapshots
and release candidates don't have an off-by-1 error in setting
deprecation and removal thresholds - they must predict the next formal
release version number.
The following three patches deal with the docs stuff.
With this series applied all versioned machine types prior to 4.1
are now removed (hidden). We can delete the code at our leisure.
Changed in v2:
- Remove hack that temporarily postponed automatic deletion
of machine types
- Fix docs version info for stable bugfix releases
Daniel P. Berrangé (5):
Revert "include/hw: temporarily disable deletion of versioned machine
types"
include/hw/boards: cope with dev/rc versions in deprecation checks
docs/about/deprecated: auto-generate a note for versioned machine
types
docs/about/removed-features: auto-generate a note for versioned
machine types
include/hw/boards: add warning about changing deprecation logic
docs/about/deprecated.rst | 7 ++++
docs/about/removed-features.rst | 10 +++---
docs/conf.py | 39 +++++++++++++++++++++-
include/hw/boards.h | 58 +++++++++++++++++++++------------
4 files changed, 89 insertions(+), 25 deletions(-)
--
2.49.0
2 months, 2 weeks
[PATCH v3 0/5] docs: automated info about machine deprecation/removal info
by Daniel P. Berrangé
Since we deprecate and remove versioned machine types on a fixed
schedule, we can automatically ensure that the docs reflect the
latest version info, rather than requiring manual updates on each
dev cycle.
The first patch in this series removes the hack which postponed
automatic removal of versioned machine types to the 10.1.0 release,
since we're now in the 10.1.0 dev cycle.
The second patch in this series fixes the logic to ensure dev snapshots
and release candidates don't have an off-by-1 error in setting
deprecation and removal thresholds - they must predict the next formal
release version number.
The following three patches deal with the docs stuff.
Changed in v3:
- Remove mistaken mention of 'ppc', only 'ppc64' has versioned
machine types
Changed in v2:
- Remove hack that temporarily postponed automatic deletion
of machine types
- Fix docs version info for stable bugfix releases
Daniel P. Berrangé (5):
Revert "include/hw: temporarily disable deletion of versioned machine
types"
include/hw/boards: cope with dev/rc versions in deprecation checks
docs/about/deprecated: auto-generate a note for versioned machine
types
docs/about/removed-features: auto-generate a note for versioned
machine types
include/hw/boards: add warning about changing deprecation logic
docs/about/deprecated.rst | 7 ++++
docs/about/removed-features.rst | 10 +++---
docs/conf.py | 39 +++++++++++++++++++++-
include/hw/boards.h | 58 +++++++++++++++++++++------------
4 files changed, 89 insertions(+), 25 deletions(-)
--
2.49.0
2 months, 2 weeks
[PULL 0/6] Versioned machine type deprecation policy patches
by Daniel P. Berrangé
The following changes since commit 57b6f8d07f1478375f85a4593a207e936c63ff59:
Merge tag 'pull-target-arm-20250506' of https://git.linaro.org/people/pmaydell/qemu-arm into staging (2025-05-07 14:28:20 -0400)
are available in the Git repository at:
https://gitlab.com/berrange/qemu tags/docs-dep-pull-request
for you to fetch changes up to 3fbb0a1397a9acea523f3c8062df8c6f8032788d:
include/hw/boards: add warning about changing deprecation logic (2025-05-08 17:11:16 +0100)
----------------------------------------------------------------
* Remove test relying on 4.1 machine type that is about to
be disabled
* Fix off-by-1 in deprecation/removal logic for versioned
machine types to cope with dev/rc versions
* Enable logic for disabling registration of versioned machine
types which have exceeded the deprecation lifetime policy.
* Add automated version information to documentation about which
versioned machine types are deprecated and removed
----------------------------------------------------------------
Daniel P. Berrangé (5):
Revert "include/hw: temporarily disable deletion of versioned machine
types"
include/hw/boards: cope with dev/rc versions in deprecation checks
docs/about/deprecated: auto-generate a note for versioned machine
types
docs/about/removed-features: auto-generate a note for versioned
machine types
include/hw/boards: add warning about changing deprecation logic
Thomas Huth (1):
tests/qtest/q35-test: Remove the obsolete test_without_smram_base test
docs/about/deprecated.rst | 7 ++++
docs/about/removed-features.rst | 10 +++---
docs/conf.py | 39 +++++++++++++++++++++-
include/hw/boards.h | 58 +++++++++++++++++++++------------
tests/qtest/q35-test.c | 37 +--------------------
5 files changed, 90 insertions(+), 61 deletions(-)
--
2.49.0
2 months, 2 weeks
RFC: libvirt-tck bhyve/FreeBSD support
by Roman Bogorodskiy
Hi,
I'd like to test the bhyve driver using libvirt-tck, which seems to be
very useful both from the continuous integration perspective,
and making the bhyve driver closer to other drivers to improve
integration with other tooling.
As a small proof-of-concept I made just a single test
060-persistent-lifecycle.t work with bhyve, though it was enough to
highlight a bunch of issues.
I've created a pull request on Gitlab:
https://gitlab.com/libvirt/libvirt-tck/-/merge_requests/58
but apparently it's not very active there, so I'll briefly repeat it
here as well.
Issues I encountered:
* Network Filters not supported
It could be solved by implementing the network filters (obviously),
but realistically it's probably not going to happen soon.
I guess there are at least 3 options to solve that:
- Add a test suite config knob to skip nwfilters
- Don't fail the test suite on non-implemented list_nwfilters
- In the bhyve driver, add a minimal implementation which
always returns 0 rules
* Hardcoded /dev/urandom RNG devices
As the bhyve driver now supports virtio-rnd with /dev/random
backend, should there be a configuration knob for libvirt-tck to
override the RNG device definition?
* Missing IDE device support
Bhyve does not (and likely never will) support IDE devices, but supports
SATA devices. Similar to the previous item, should there be a config knob?
Or maybe just change default to sata?
* Missing pty console support
Bhyve does not support pty consoles. The bhyve driver currently supports
the nmdm console, and bhyve also supports virtio-console and TCP socket
connection which are not yet supported by the driver.
* Firmware loader specification
This is probably one of the trickiest ones. When no loader specified,
the bhyve driver uses the bhyveload(8) loader which allows to boot
FreeBSD guests only. That means that any other guest OS requires
specifying the BHYVE_UEFI.fd firmware (or use grub-bhyve). I see
two options for that:
- A test suite/framework configuration knob (again)
- Change the bhyve driver to default to BHYVE_UEFI.fd. I like this option
for two reasons: it makes bhyve domain XMLs more compatible with other
drivers, and also looks like a more reasonable default, because
for example I don't run FreeBSD guests inside bhyve very often, so for
most of my domains I have to set loader. It's a bit inconvenient that
the firmware is not a part of bhyve and needs to be installed through
the port, but we can make it possible to set the default firmware
via the build option and allow to override that via the bhyve driver
configuration, which should provide enough flexibility to users, I guess.
Any thoughts and recommendations how to tackle this project appreciated.
Thanks,
Roman
2 months, 2 weeks
[PATCH] rpm: disable zfs on Fedora >= 43
by Daniel P. Berrangé
From: Daniel P. Berrangé <berrange(a)redhat.com>
The zfs-fuse package has been dead upstream for a long time and is
now retired in Fedora rawhide.
Signed-off-by: Daniel P. Berrangé <berrange(a)redhat.com>
---
libvirt.spec.in | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/libvirt.spec.in b/libvirt.spec.in
index 9217820137..e12bec18d5 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -85,8 +85,8 @@
%endif
%endif
-# Fedora has zfs-fuse
-%if 0%{?fedora}
+# Fedora had zfs-fuse until F43
+%if 0%{?fedora} && 0%{?fedora} < 43
%define with_storage_zfs 0%{!?_without_storage_zfs:1}
%else
%define with_storage_zfs 0
@@ -700,6 +700,9 @@ Requires: /usr/bin/qemu-img
Obsoletes: libvirt-daemon-driver-storage-rbd < 5.2.0
%endif
Obsoletes: libvirt-daemon-driver-storage-sheepdog < 8.8.0
+ %if !%{with_storage_zfs}
+Obsoletes: libvirt-daemon-driver-storage-zfs < 11.4.0
+ %endif
%description daemon-driver-storage-core
The storage driver plugin for the libvirtd daemon, providing
--
2.49.0
2 months, 2 weeks
[PATCH 0/2] tests: Add capabilities for QEMU 10.0 on aarch64
by Matthew R. Ochs
This is a refresh of a series that Andrea Bolognani posted in February
2025 [1] and provides aarch64 capability support for QEMU v10.0.0.
[1] https://lists.libvirt.org/archives/list/devel@lists.libvirt.org/thread/Z4...
Signed-off-by: Matthew R. Ochs <mochs(a)nvidia.com>
Matthew R. Ochs (2):
tests: Use collie instead of borzoi for aarch64 tests
tests: Add capabilities for QEMU 10.0.0 on aarch64
.../qemu_10.0.0-virt.aarch64.xml | 237 +
tests/domaincapsdata/qemu_10.0.0.aarch64.xml | 237 +
.../caps_10.0.0_aarch64.replies | 37426 ++++++++++++++++
.../caps_10.0.0_aarch64.xml | 546 +
...arch64-cpu-passthrough.aarch64-latest.args | 5 +-
.../aarch64-kvm-32-on-64.aarch64-latest.args | 5 +-
.../aarch64-noacpi-acpi.aarch64-latest.err | 2 +-
tests/qemuxmlconfdata/aarch64-noacpi-acpi.xml | 2 +-
...usb-minimal.aarch64-latest.abi-update.args | 3 +-
...ousb-minimal.aarch64-latest.abi-update.xml | 2 +-
.../aarch64-nousb-minimal.aarch64-latest.args | 3 +-
.../aarch64-nousb-minimal.aarch64-latest.xml | 2 +-
.../qemuxmlconfdata/aarch64-nousb-minimal.xml | 2 +-
.../aarch64-virt-graphics.aarch64-latest.args | 5 +-
...h64-virt-headless-mmio.aarch64-latest.args | 5 +-
.../aarch64-virt-headless.aarch64-latest.args | 5 +-
.../aarch64-virt-virtio.aarch64-latest.args | 5 +-
...o-pci-manual-addresses.aarch64-latest.args | 5 +-
.../arm-vexpressa9-basic.aarch64-latest.args | 1 -
.../arm-vexpressa9-basic.aarch64-latest.xml | 3 -
.../arm-vexpressa9-nodevs.aarch64-latest.args | 1 -
.../arm-vexpressa9-nodevs.aarch64-latest.xml | 3 -
.../arm-vexpressa9-virtio.aarch64-latest.args | 6 +-
.../arm-vexpressa9-virtio.aarch64-latest.xml | 3 -
.../disk-arm-virtio-sd.aarch64-latest.args | 1 -
.../disk-arm-virtio-sd.aarch64-latest.xml | 3 -
...mware-auto-efi-aarch64.aarch64-latest.args | 5 +-
...-loader-raw.aarch64-latest.abi-update.args | 5 +-
...-efi-format-loader-raw.aarch64-latest.args | 5 +-
...i-aarch64-legacy-paths.aarch64-latest.args | 5 +-
...anual-efi-acpi-aarch64.aarch64-latest.args | 5 +-
...ual-efi-noacpi-aarch64.aarch64-latest.args | 5 +-
.../pvpanic-pci-aarch64.aarch64-latest.args | 5 +-
...pci-no-address-aarch64.aarch64-latest.args | 5 +-
...default-fallback-nousb.aarch64-latest.args | 3 +-
...-default-fallback-nousb.aarch64-latest.xml | 2 +-
...ntroller-default-nousb.aarch64-latest.args | 3 +-
...ontroller-default-nousb.aarch64-latest.xml | 2 +-
.../usb-controller-default-nousb.xml | 2 +-
...ault-unavailable-nousb.aarch64-latest.args | 3 +-
...fault-unavailable-nousb.aarch64-latest.xml | 2 +-
.../virtio-iommu-aarch64.aarch64-latest.args | 5 +-
tests/qemuxmlconftest.c | 2 +-
43 files changed, 38500 insertions(+), 82 deletions(-)
create mode 100644 tests/domaincapsdata/qemu_10.0.0-virt.aarch64.xml
create mode 100644 tests/domaincapsdata/qemu_10.0.0.aarch64.xml
create mode 100644 tests/qemucapabilitiesdata/caps_10.0.0_aarch64.replies
create mode 100644 tests/qemucapabilitiesdata/caps_10.0.0_aarch64.xml
--
2.46.0
2 months, 2 weeks
Re: [RFC PATCH 0/3] single-binary: make QAPI generated files common
by Markus Armbruster
Pierrick Bouvier <pierrick.bouvier(a)linaro.org> writes:
> Note: This RFC was posted to trigger a discussion around this topic, and it's
> not expected to merge it as it is.
>
> Context
> =======
>
> Linaro is working towards heterogeneous emulation, mixing several architectures
> in a single QEMU process. The first prerequisite is to be able to build such a
> binary, which we commonly name "single-binary" in our various series.
> An (incomplete) list of series is available here:
> https://patchew.org/search?q=project%3AQEMU+single-binary
>
> We don't expect to change existing command line interface or any observable
> behaviour, it should be identical to existing binaries. If anyone notices a
> difference, it will be a bug.
Define "notice a difference" :) More on that below.
> The first objective we target is to combine qemu-system-arm and
> qemu-system-aarch64 in a single binary, showing that we can build and link such
> a thing. While being useless from a feature point of view, it allows us to make
> good progress towards the goal, and unify two "distinct" architectures, and gain
> experience on problems met.
Makes sense to me.
> Our current approach is to remove compilation units duplication to be able to
> link all object files together. One of the concerned subsystem is QAPI.
>
> QAPI
> ====
>
> QAPI generated files contain conditional clauses to define various structures,
> enums, and commands only for specific targets. This forces files to be
> compiled for every target.
To be precise: conditionals that use macros restricted to
target-specific code, i.e. the ones poisoned by exec/poison.h. Let's
call them target-specific QAPI conditionals.
The QAPI generator is blissfully unaware of all this.
The build system treats QAPI modules qapi/*-target.json as
target-specific. The .c files generated for them are compiled per
target. See qapi/meson.build.
Only such target-specific modules can can use target-specific QAPI
conditionals. Use in target-independent modules will generate C that
won't compile.
Poisoned macros used in qapi/*-target.json:
CONFIG_KVM
TARGET_ARM
TARGET_I386
TARGET_LOONGARCH64
TARGET_MIPS
TARGET_PPC
TARGET_RISCV
TARGET_S390X
> What we try to do here is to build them only once
> instead.
You're trying to eliminate target-specific QAPI conditionals. Correct?
> In the past, we identied that the best approach to solve this is to expose code
> for all targets (thus removing all #if clauses), and stub missing
> symbols for concerned targets.
This affects QAPI/QMP introspection, i.e. the value of query-qmp-schema.
Management applications can no longer use introspection to find out
whether target-specific things are available.
For instance, query-cpu-definitions is implemented for targets arm,
i386, loongarch, mips, ppc, riscv, and s390x. It initially was for
fewer targets, and more targets followed one by one. Still more may
follow in the future. Right now, management applications can use
introspection to find out whether it is available. That stops working
when you make it available for all targets, stubbed out for the ones
that don't (yet) implement it.
Management applications may have to be adjusted for this.
This is not an attempt to shoot down your approach. I'm merely
demonstrating limitations of your promise "if anyone notices a
difference, it will be a bug."
Now, we could get really fancy and try to keep introspection the same by
applying conditionals dynamically somehow. I.e. have the single binary
return different introspection values depending on the actual guest's
target.
This requires fixing the target before introspection. Unless this is
somehow completely transparent (wrapper scripts, or awful hacks based on
the binary's filename, perhaps), management applications may have to be
adjusted to actually do that.
Applies not just to introspection. Consider query-cpu-definitions
again. It currently returns CPU definitions for *the* target. What
would a single binary's query-cpu-definitions return? The CPU
definitions for *all* its targets? Management applications then receive
CPUs that won't work, which may upset them. To avoid noticable
difference, we again have to fix the target before we look.
Of course, "fixing the target" stops making sense once we move to
heterogeneous machines with multiple targets.
> This series build QAPI generated code once, by removing all TARGET_{arch} and
> CONFIG_KVM clauses. What it does *not* at the moment is:
> - prevent target specific commands to be visible for all targets
> (see TODO comment on patch 2 explaining how to address this)
> - nothing was done to hide all this from generated documentation
For better or worse, generated documentation always contains everything.
An argument could be made for stripping out documentation for the stuff
that isn't included in this build.
> From what I understood, the only thing that matters is to limit qmp commands
> visible. Exposing enums, structure, or events is not a problem, since they
> won't be used/triggered for non concerned targets. Please correct me if this is
> wrong, and if there are unexpected consequences for libvirt or other consumers.
I'm not sure what you mean by "to limit qmp commands visible".
QAPI/QMP introspection has all commands and events, and all types
reachable from them. query-qmp-schema returns an array, where each
array element describes one command, event, or type. When a command,
event, or type is conditional in the schema, the element is wrapped in
the #if generated for the condition.
>
> Impact on code size
> ===================
>
> There is a strong focus on keeping QEMU fast and small. Concerning performance,
> there is no impact, as the only thing that would change is to conditionally
> check current target to register some commands.
> Concerning code size, you can find the impact on various qemu-system binaries
> with optimized and stripped build.
>
> upstream:
> 12588 ./build/qemu-system-s390x
> 83992 ./build/qemu-system-x86_64
> 31884 ./build/qemu-system-aarch64
> upstream + this series:
> 12644 ./build/qemu-system-s390x (+56kB, +0.004%)
> 84076 ./build/qemu-system-x86_64 (+84kB, +0.001%)
> 31944 ./build/qemu-system-aarch64 (+60kB, +0.001%)
>
> Feedback
> ========
>
> The goal of this series is to be spark a conversation around following topics:
>
> - Would you be open to such an approach? (expose all code, and restrict commands
> registered at runtime only for specific targets)
Yes, if we can find acceptable solutions for the problems that come with
it.
> - Are there unexpected consequences for libvirt or other consumers to expose
> more definitions than what we have now?
Maybe.
> - Would you recommend another approach instead? I experimented with having per
> target generated files, but we still need to expose quite a lot in headers, so
> my opinion is that it's much more complicated for zero benefit. As well, the
> code size impact is more than negligible, so the simpler, the better.
>
> Feel free to add anyone I could have missed in CC.
I'm throwing in devel(a)lists.libvirt.org.
> Regards,
> Pierrick
>
> Pierrick Bouvier (3):
> qapi: add weak stubs for target specific commands
> qapi: always expose TARGET_* or CONFIG_KVM code
> qapi: make all generated files common
>
> qapi/commands-weak-stubs.c | 38 ++++++++++++++++++++++++++++++++++++++
> qapi/meson.build | 5 ++++-
> scripts/qapi/commands.py | 4 ++++
> scripts/qapi/common.py | 4 +++-
> 4 files changed, 49 insertions(+), 2 deletions(-)
> create mode 100644 qapi/commands-weak-stubs.c
2 months, 2 weeks
[PATCH] qemu_capabilities: Fetch caps for virtio-mem-ccw too
by Michal Privoznik
From: Michal Privoznik <mprivozn(a)redhat.com>
While with upstream QEMU it's impossible to have virtio-mem-ccw and not
have virtio-mem-pci, in RHEL the QEMU's build system is patched to make
that possible. But this breaks our assumption when fetching
capabilities.
Well, just do what we are already doing in this situation (e.g.
"virtio-blk-pci"/"virtio-blk-ccw" & virQEMUCapsDevicePropsVirtioBlk, or
"virtio-scsi-pci"/"virtio-net-ccw" & virQEMUCapsDevicePropsVirtioSCSI):
fetch the same set of props for both devices.
Resolves: https://issues.redhat.com/browse/RHEL-87528
Resolves: https://issues.redhat.com/browse/RHEL-87532
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
src/qemu/qemu_capabilities.c | 3 +
.../caps_10.0.0_s390x.replies | 206 +++++++++++++++---
2 files changed, 183 insertions(+), 26 deletions(-)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index a804335c85..1a4f28facf 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -1716,6 +1716,9 @@ static virQEMUCapsDeviceTypeProps virQEMUCapsDeviceProps[] = {
{ "virtio-mem-pci", virQEMUCapsDevicePropsVirtioMemPCI,
G_N_ELEMENTS(virQEMUCapsDevicePropsVirtioMemPCI),
QEMU_CAPS_DEVICE_VIRTIO_MEM_PCI },
+ { "virtio-mem-ccw", virQEMUCapsDevicePropsVirtioMemPCI,
+ G_N_ELEMENTS(virQEMUCapsDevicePropsVirtioMemPCI),
+ QEMU_CAPS_DEVICE_VIRTIO_MEM_CCW },
{ "virtio-iommu-pci", virQEMUCapsDevicePropsVirtioIOMMU,
G_N_ELEMENTS(virQEMUCapsDevicePropsVirtioIOMMU),
QEMU_CAPS_DEVICE_VIRTIO_IOMMU_PCI },
diff --git a/tests/qemucapabilitiesdata/caps_10.0.0_s390x.replies b/tests/qemucapabilitiesdata/caps_10.0.0_s390x.replies
index d941bc41c7..1fe6526a3f 100644
--- a/tests/qemucapabilitiesdata/caps_10.0.0_s390x.replies
+++ b/tests/qemucapabilitiesdata/caps_10.0.0_s390x.replies
@@ -29792,12 +29792,166 @@
"id": "libvirt-29"
}
+{
+ "execute": "device-list-properties",
+ "arguments": {
+ "typename": "virtio-mem-ccw"
+ },
+ "id": "libvirt-30"
+}
+
+{
+ "return": [
+ {
+ "name": "dev_id",
+ "description": "Read-only identifier of an I/O device in the channel subsystem, example: fe.1.23ab",
+ "type": "str"
+ },
+ {
+ "name": "devno",
+ "description": "Identifier of an I/O device in the channel subsystem, example: fe.1.23ab",
+ "type": "str"
+ },
+ {
+ "name": "subch_id",
+ "description": "Read-only identifier of an I/O device in the channel subsystem, example: fe.1.23ab",
+ "type": "str"
+ },
+ {
+ "default-value": 2,
+ "name": "max_revision",
+ "type": "uint32"
+ },
+ {
+ "default-value": true,
+ "name": "ioeventfd",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": 0,
+ "name": "memaddr",
+ "type": "uint64"
+ },
+ {
+ "default-value": true,
+ "name": "indirect_desc",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": false,
+ "name": "iommu_platform",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "name": "memdev",
+ "type": "link<memory-backend>"
+ },
+ {
+ "default-value": true,
+ "name": "event_idx",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": true,
+ "name": "x-early-migration",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": 0,
+ "name": "node",
+ "type": "uint32"
+ },
+ {
+ "name": "requested-size",
+ "type": "size"
+ },
+ {
+ "default-value": true,
+ "name": "any_layout",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": false,
+ "name": "x-disable-legacy-check",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": true,
+ "name": "queue_reset",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": true,
+ "name": "notify_on_empty",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": false,
+ "name": "packed",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "name": "block-size",
+ "type": "size"
+ },
+ {
+ "default-value": false,
+ "name": "prealloc",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": true,
+ "name": "use-started",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": false,
+ "name": "in_order",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "name": "size",
+ "type": "size"
+ },
+ {
+ "default-value": true,
+ "name": "use-disabled-flag",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "default-value": true,
+ "name": "dynamic-memslots",
+ "description": "on/off",
+ "type": "bool"
+ },
+ {
+ "name": "virtio-backend",
+ "type": "child<virtio-mem>"
+ }
+ ],
+ "id": "libvirt-30"
+}
+
{
"execute": "device-list-properties",
"arguments": {
"typename": "virtio-iommu-pci"
},
- "id": "libvirt-30"
+ "id": "libvirt-31"
}
{
@@ -30075,7 +30229,7 @@
"type": "child<virtio-iommu-device>"
}
],
- "id": "libvirt-30"
+ "id": "libvirt-31"
}
{
@@ -30083,7 +30237,7 @@
"arguments": {
"typename": "virtio-blk-ccw"
},
- "id": "libvirt-31"
+ "id": "libvirt-32"
}
{
@@ -30373,7 +30527,7 @@
"type": "bool"
}
],
- "id": "libvirt-31"
+ "id": "libvirt-32"
}
{
@@ -30381,7 +30535,7 @@
"arguments": {
"typename": "memory-backend-file"
},
- "id": "libvirt-32"
+ "id": "libvirt-33"
}
{
@@ -30471,7 +30625,7 @@
"type": "bool"
}
],
- "id": "libvirt-32"
+ "id": "libvirt-33"
}
{
@@ -30479,7 +30633,7 @@
"arguments": {
"typename": "memory-backend-memfd"
},
- "id": "libvirt-33"
+ "id": "libvirt-34"
}
{
@@ -30558,12 +30712,12 @@
"type": "int"
}
],
- "id": "libvirt-33"
+ "id": "libvirt-34"
}
{
"execute": "query-machines",
- "id": "libvirt-34"
+ "id": "libvirt-35"
}
{
@@ -30838,7 +30992,7 @@
"default-ram-id": "s390.ram"
}
],
- "id": "libvirt-34"
+ "id": "libvirt-35"
}
{
@@ -30846,7 +31000,7 @@
"arguments": {
"typename": "none-machine"
},
- "id": "libvirt-35"
+ "id": "libvirt-36"
}
{
@@ -30965,12 +31119,12 @@
"type": "bool"
}
],
- "id": "libvirt-35"
+ "id": "libvirt-36"
}
{
"execute": "query-cpu-definitions",
- "id": "libvirt-36"
+ "id": "libvirt-37"
}
{
@@ -31680,32 +31834,32 @@
"deprecated": false
}
],
- "id": "libvirt-36"
+ "id": "libvirt-37"
}
{
"execute": "query-tpm-models",
- "id": "libvirt-37"
+ "id": "libvirt-38"
}
{
"return": [],
- "id": "libvirt-37"
+ "id": "libvirt-38"
}
{
"execute": "query-tpm-types",
- "id": "libvirt-38"
+ "id": "libvirt-39"
}
{
"return": [],
- "id": "libvirt-38"
+ "id": "libvirt-39"
}
{
"execute": "query-command-line-options",
- "id": "libvirt-39"
+ "id": "libvirt-40"
}
{
@@ -32983,12 +33137,12 @@
"option": "drive"
}
],
- "id": "libvirt-39"
+ "id": "libvirt-40"
}
{
"execute": "query-migrate-capabilities",
- "id": "libvirt-40"
+ "id": "libvirt-41"
}
{
@@ -33082,7 +33236,7 @@
"capability": "mapped-ram"
}
],
- "id": "libvirt-40"
+ "id": "libvirt-41"
}
{
@@ -33093,7 +33247,7 @@
"name": "host"
}
},
- "id": "libvirt-41"
+ "id": "libvirt-42"
}
{
@@ -33165,7 +33319,7 @@
}
}
},
- "id": "libvirt-41"
+ "id": "libvirt-42"
}
{
@@ -33176,7 +33330,7 @@
"name": "host"
}
},
- "id": "libvirt-42"
+ "id": "libvirt-43"
}
{
@@ -33332,7 +33486,7 @@
}
}
},
- "id": "libvirt-42"
+ "id": "libvirt-43"
}
{
--
2.49.0
2 months, 2 weeks