Plans for the next release
by Jiri Denemark
We are getting close to the next release of libvirt. To aim for the
release on Oct 03 I suggest entering the freeze on Tuesday Sep 27 and
tagging RC2 on Friday Sep 30.
I hope this works for everyone.
Jirka
2 years, 2 months
Re: [PATCH v2 08/11] vfio/migration: Remove VFIO migration protocol v1
by Philippe Mathieu-Daudé
On Mon, May 30, 2022 at 7:56 PM Avihai Horon <avihaih(a)nvidia.com> wrote:
>
> Now that v2 protocol implementation has been added, remove the
> deprecated v1 implementation.
Worth a note in docs/about/deprecated.rst?
> Signed-off-by: Avihai Horon <avihaih(a)nvidia.com>
> ---
> hw/vfio/common.c | 19 +-
> hw/vfio/migration.c | 698 +---------------------------------
> hw/vfio/trace-events | 5 -
> include/hw/vfio/vfio-common.h | 5 -
> 4 files changed, 24 insertions(+), 703 deletions(-)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 5541133ec9..00c6cb0ffe 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -355,14 +355,7 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
> return false;
> }
>
> - if (!migration->v2 &&
> - (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
> - (migration->device_state_v1 & VFIO_DEVICE_STATE_V1_RUNNING)) {
> - return false;
> - }
> -
> - if (migration->v2 &&
> - (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
> + if ((vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF) &&
> (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
> migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P)) {
> return false;
> @@ -393,14 +386,8 @@ static bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
> return false;
> }
>
> - if (!migration->v2 &&
> - migration->device_state_v1 & VFIO_DEVICE_STATE_V1_RUNNING) {
> - continue;
> - }
> -
> - if (migration->v2 &&
> - (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
> - migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P)) {
> + if (migration->device_state == VFIO_DEVICE_STATE_RUNNING ||
> + migration->device_state == VFIO_DEVICE_STATE_RUNNING_P2P) {
> continue;
> } else {
> return false;
> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
> index de68eadb09..852759e6ca 100644
> --- a/hw/vfio/migration.c
> +++ b/hw/vfio/migration.c
> @@ -121,220 +121,6 @@ static int vfio_migration_set_state(VFIODevice *vbasedev,
> return 0;
> }
>
> -static inline int vfio_mig_access(VFIODevice *vbasedev, void *val, int count,
> - off_t off, bool iswrite)
> -{
> - int ret;
> -
> - ret = iswrite ? pwrite(vbasedev->fd, val, count, off) :
> - pread(vbasedev->fd, val, count, off);
> - if (ret < count) {
> - error_report("vfio_mig_%s %d byte %s: failed at offset 0x%"
> - HWADDR_PRIx", err: %s", iswrite ? "write" : "read", count,
> - vbasedev->name, off, strerror(errno));
> - return (ret < 0) ? ret : -EINVAL;
> - }
> - return 0;
> -}
> -
> -static int vfio_mig_rw(VFIODevice *vbasedev, __u8 *buf, size_t count,
> - off_t off, bool iswrite)
> -{
> - int ret, done = 0;
> - __u8 *tbuf = buf;
> -
> - while (count) {
> - int bytes = 0;
> -
> - if (count >= 8 && !(off % 8)) {
> - bytes = 8;
> - } else if (count >= 4 && !(off % 4)) {
> - bytes = 4;
> - } else if (count >= 2 && !(off % 2)) {
> - bytes = 2;
> - } else {
> - bytes = 1;
> - }
> -
> - ret = vfio_mig_access(vbasedev, tbuf, bytes, off, iswrite);
> - if (ret) {
> - return ret;
> - }
> -
> - count -= bytes;
> - done += bytes;
> - off += bytes;
> - tbuf += bytes;
> - }
> - return done;
> -}
> -
> -#define vfio_mig_read(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, false)
> -#define vfio_mig_write(f, v, c, o) vfio_mig_rw(f, (__u8 *)v, c, o, true)
> -
> -#define VFIO_MIG_STRUCT_OFFSET(f) \
> - offsetof(struct vfio_device_migration_info, f)
> -/*
> - * Change the device_state register for device @vbasedev. Bits set in @mask
> - * are preserved, bits set in @value are set, and bits not set in either @mask
> - * or @value are cleared in device_state. If the register cannot be accessed,
> - * the resulting state would be invalid, or the device enters an error state,
> - * an error is returned.
> - */
> -
> -static int vfio_migration_v1_set_state(VFIODevice *vbasedev, uint32_t mask,
> - uint32_t value)
> -{
> - VFIOMigration *migration = vbasedev->migration;
> - VFIORegion *region = &migration->region;
> - off_t dev_state_off = region->fd_offset +
> - VFIO_MIG_STRUCT_OFFSET(device_state);
> - uint32_t device_state;
> - int ret;
> -
> - ret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state),
> - dev_state_off);
> - if (ret < 0) {
> - return ret;
> - }
> -
> - device_state = (device_state & mask) | value;
> -
> - if (!VFIO_DEVICE_STATE_VALID(device_state)) {
> - return -EINVAL;
> - }
> -
> - ret = vfio_mig_write(vbasedev, &device_state, sizeof(device_state),
> - dev_state_off);
> - if (ret < 0) {
> - int rret;
> -
> - rret = vfio_mig_read(vbasedev, &device_state, sizeof(device_state),
> - dev_state_off);
> -
> - if ((rret < 0) || (VFIO_DEVICE_STATE_IS_ERROR(device_state))) {
> - hw_error("%s: Device in error state 0x%x", vbasedev->name,
> - device_state);
> - return rret ? rret : -EIO;
> - }
> - return ret;
> - }
> -
> - migration->device_state_v1 = device_state;
> - trace_vfio_migration_set_state(vbasedev->name, device_state);
> - return 0;
> -}
> -
> -static void *get_data_section_size(VFIORegion *region, uint64_t data_offset,
> - uint64_t data_size, uint64_t *size)
> -{
> - void *ptr = NULL;
> - uint64_t limit = 0;
> - int i;
> -
> - if (!region->mmaps) {
> - if (size) {
> - *size = MIN(data_size, region->size - data_offset);
> - }
> - return ptr;
> - }
> -
> - for (i = 0; i < region->nr_mmaps; i++) {
> - VFIOMmap *map = region->mmaps + i;
> -
> - if ((data_offset >= map->offset) &&
> - (data_offset < map->offset + map->size)) {
> -
> - /* check if data_offset is within sparse mmap areas */
> - ptr = map->mmap + data_offset - map->offset;
> - if (size) {
> - *size = MIN(data_size, map->offset + map->size - data_offset);
> - }
> - break;
> - } else if ((data_offset < map->offset) &&
> - (!limit || limit > map->offset)) {
> - /*
> - * data_offset is not within sparse mmap areas, find size of
> - * non-mapped area. Check through all list since region->mmaps list
> - * is not sorted.
> - */
> - limit = map->offset;
> - }
> - }
> -
> - if (!ptr && size) {
> - *size = limit ? MIN(data_size, limit - data_offset) : data_size;
> - }
> - return ptr;
> -}
> -
> -static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev, uint64_t *size)
> -{
> - VFIOMigration *migration = vbasedev->migration;
> - VFIORegion *region = &migration->region;
> - uint64_t data_offset = 0, data_size = 0, sz;
> - int ret;
> -
> - ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset),
> - region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset));
> - if (ret < 0) {
> - return ret;
> - }
> -
> - ret = vfio_mig_read(vbasedev, &data_size, sizeof(data_size),
> - region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size));
> - if (ret < 0) {
> - return ret;
> - }
> -
> - trace_vfio_save_buffer(vbasedev->name, data_offset, data_size,
> - migration->pending_bytes);
> -
> - qemu_put_be64(f, data_size);
> - sz = data_size;
> -
> - while (sz) {
> - void *buf;
> - uint64_t sec_size;
> - bool buf_allocated = false;
> -
> - buf = get_data_section_size(region, data_offset, sz, &sec_size);
> -
> - if (!buf) {
> - buf = g_try_malloc(sec_size);
> - if (!buf) {
> - error_report("%s: Error allocating buffer ", __func__);
> - return -ENOMEM;
> - }
> - buf_allocated = true;
> -
> - ret = vfio_mig_read(vbasedev, buf, sec_size,
> - region->fd_offset + data_offset);
> - if (ret < 0) {
> - g_free(buf);
> - return ret;
> - }
> - }
> -
> - qemu_put_buffer(f, buf, sec_size);
> -
> - if (buf_allocated) {
> - g_free(buf);
> - }
> - sz -= sec_size;
> - data_offset += sec_size;
> - }
> -
> - ret = qemu_file_get_error(f);
> -
> - if (!ret && size) {
> - *size = data_size;
> - }
> -
> - bytes_transferred += data_size;
> - return ret;
> -}
> -
> static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
> uint64_t data_size)
> {
> @@ -351,96 +137,6 @@ static int vfio_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
> return 0;
> }
>
> -static int vfio_v1_load_buffer(QEMUFile *f, VFIODevice *vbasedev,
> - uint64_t data_size)
> -{
> - VFIORegion *region = &vbasedev->migration->region;
> - uint64_t data_offset = 0, size, report_size;
> - int ret;
> -
> - do {
> - ret = vfio_mig_read(vbasedev, &data_offset, sizeof(data_offset),
> - region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_offset));
> - if (ret < 0) {
> - return ret;
> - }
> -
> - if (data_offset + data_size > region->size) {
> - /*
> - * If data_size is greater than the data section of migration region
> - * then iterate the write buffer operation. This case can occur if
> - * size of migration region at destination is smaller than size of
> - * migration region at source.
> - */
> - report_size = size = region->size - data_offset;
> - data_size -= size;
> - } else {
> - report_size = size = data_size;
> - data_size = 0;
> - }
> -
> - trace_vfio_v1_load_state_device_data(vbasedev->name, data_offset, size);
> -
> - while (size) {
> - void *buf;
> - uint64_t sec_size;
> - bool buf_alloc = false;
> -
> - buf = get_data_section_size(region, data_offset, size, &sec_size);
> -
> - if (!buf) {
> - buf = g_try_malloc(sec_size);
> - if (!buf) {
> - error_report("%s: Error allocating buffer ", __func__);
> - return -ENOMEM;
> - }
> - buf_alloc = true;
> - }
> -
> - qemu_get_buffer(f, buf, sec_size);
> -
> - if (buf_alloc) {
> - ret = vfio_mig_write(vbasedev, buf, sec_size,
> - region->fd_offset + data_offset);
> - g_free(buf);
> -
> - if (ret < 0) {
> - return ret;
> - }
> - }
> - size -= sec_size;
> - data_offset += sec_size;
> - }
> -
> - ret = vfio_mig_write(vbasedev, &report_size, sizeof(report_size),
> - region->fd_offset + VFIO_MIG_STRUCT_OFFSET(data_size));
> - if (ret < 0) {
> - return ret;
> - }
> - } while (data_size);
> -
> - return 0;
> -}
> -
> -static int vfio_update_pending(VFIODevice *vbasedev)
> -{
> - VFIOMigration *migration = vbasedev->migration;
> - VFIORegion *region = &migration->region;
> - uint64_t pending_bytes = 0;
> - int ret;
> -
> - ret = vfio_mig_read(vbasedev, &pending_bytes, sizeof(pending_bytes),
> - region->fd_offset + VFIO_MIG_STRUCT_OFFSET(pending_bytes));
> - if (ret < 0) {
> - migration->pending_bytes = 0;
> - return ret;
> - }
> -
> - migration->pending_bytes = pending_bytes;
> - trace_vfio_update_pending(vbasedev->name, pending_bytes);
> - return 0;
> -}
> -
> static int vfio_save_device_config_state(QEMUFile *f, void *opaque)
> {
> VFIODevice *vbasedev = opaque;
> @@ -493,15 +189,6 @@ static void vfio_migration_cleanup(VFIODevice *vbasedev)
> migration->data_fd = -1;
> }
>
> -static void vfio_migration_v1_cleanup(VFIODevice *vbasedev)
> -{
> - VFIOMigration *migration = vbasedev->migration;
> -
> - if (migration->region.mmaps) {
> - vfio_region_unmap(&migration->region);
> - }
> -}
> -
> /* ---------------------------------------------------------------------- */
>
> static int vfio_save_setup(QEMUFile *f, void *opaque)
> @@ -516,49 +203,6 @@ static int vfio_save_setup(QEMUFile *f, void *opaque)
> return qemu_file_get_error(f);
> }
>
> -static int vfio_v1_save_setup(QEMUFile *f, void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - int ret;
> -
> - trace_vfio_save_setup(vbasedev->name);
> -
> - qemu_put_be64(f, VFIO_MIG_FLAG_DEV_SETUP_STATE);
> -
> - if (migration->region.mmaps) {
> - /*
> - * Calling vfio_region_mmap() from migration thread. Memory API called
> - * from this function require locking the iothread when called from
> - * outside the main loop thread.
> - */
> - qemu_mutex_lock_iothread();
> - ret = vfio_region_mmap(&migration->region);
> - qemu_mutex_unlock_iothread();
> - if (ret) {
> - error_report("%s: Failed to mmap VFIO migration region: %s",
> - vbasedev->name, strerror(-ret));
> - error_report("%s: Falling back to slow path", vbasedev->name);
> - }
> - }
> -
> - ret = vfio_migration_v1_set_state(vbasedev, VFIO_DEVICE_STATE_MASK,
> - VFIO_DEVICE_STATE_V1_SAVING);
> - if (ret) {
> - error_report("%s: Failed to set state SAVING", vbasedev->name);
> - return ret;
> - }
> -
> - qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
> -
> - ret = qemu_file_get_error(f);
> - if (ret) {
> - return ret;
> - }
> -
> - return 0;
> -}
> -
> static void vfio_save_cleanup(void *opaque)
> {
> VFIODevice *vbasedev = opaque;
> @@ -567,82 +211,6 @@ static void vfio_save_cleanup(void *opaque)
> trace_vfio_save_cleanup(vbasedev->name);
> }
>
> -static void vfio_v1_save_cleanup(void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> -
> - vfio_migration_v1_cleanup(vbasedev);
> - trace_vfio_save_cleanup(vbasedev->name);
> -}
> -
> -static void vfio_save_pending(QEMUFile *f, void *opaque,
> - uint64_t threshold_size,
> - uint64_t *res_precopy_only,
> - uint64_t *res_compatible,
> - uint64_t *res_postcopy_only)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - int ret;
> -
> - ret = vfio_update_pending(vbasedev);
> - if (ret) {
> - return;
> - }
> -
> - *res_precopy_only += migration->pending_bytes;
> -
> - trace_vfio_save_pending(vbasedev->name, *res_precopy_only,
> - *res_postcopy_only, *res_compatible);
> -}
> -
> -static int vfio_save_iterate(QEMUFile *f, void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - uint64_t data_size;
> - int ret;
> -
> - qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
> -
> - if (migration->pending_bytes == 0) {
> - ret = vfio_update_pending(vbasedev);
> - if (ret) {
> - return ret;
> - }
> -
> - if (migration->pending_bytes == 0) {
> - qemu_put_be64(f, 0);
> - qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
> - /* indicates data finished, goto complete phase */
> - return 1;
> - }
> - }
> -
> - ret = vfio_save_buffer(f, vbasedev, &data_size);
> - if (ret) {
> - error_report("%s: vfio_save_buffer failed %s", vbasedev->name,
> - strerror(errno));
> - return ret;
> - }
> -
> - qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
> -
> - ret = qemu_file_get_error(f);
> - if (ret) {
> - return ret;
> - }
> -
> - /*
> - * Reset pending_bytes as .save_live_pending is not called during savevm or
> - * snapshot case, in such case vfio_update_pending() at the start of this
> - * function updates pending_bytes.
> - */
> - migration->pending_bytes = 0;
> - trace_vfio_save_iterate(vbasedev->name, data_size);
> - return 0;
> -}
> -
> /* Returns 1 if end-of-stream is reached, 0 if more data and -1 if error */
> static int vfio_save_block(QEMUFile *f, VFIOMigration *migration)
> {
> @@ -706,62 +274,6 @@ static int vfio_save_complete_precopy(QEMUFile *f, void *opaque)
> return 0;
> }
>
> -static int vfio_v1_save_complete_precopy(QEMUFile *f, void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - uint64_t data_size;
> - int ret;
> -
> - ret = vfio_migration_v1_set_state(vbasedev, ~VFIO_DEVICE_STATE_V1_RUNNING,
> - VFIO_DEVICE_STATE_V1_SAVING);
> - if (ret) {
> - error_report("%s: Failed to set state STOP and SAVING",
> - vbasedev->name);
> - return ret;
> - }
> -
> - ret = vfio_update_pending(vbasedev);
> - if (ret) {
> - return ret;
> - }
> -
> - while (migration->pending_bytes > 0) {
> - qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE);
> - ret = vfio_save_buffer(f, vbasedev, &data_size);
> - if (ret < 0) {
> - error_report("%s: Failed to save buffer", vbasedev->name);
> - return ret;
> - }
> -
> - if (data_size == 0) {
> - break;
> - }
> -
> - ret = vfio_update_pending(vbasedev);
> - if (ret) {
> - return ret;
> - }
> - }
> -
> - qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE);
> -
> - ret = qemu_file_get_error(f);
> - if (ret) {
> - return ret;
> - }
> -
> - ret = vfio_migration_v1_set_state(vbasedev, ~VFIO_DEVICE_STATE_V1_SAVING,
> - 0);
> - if (ret) {
> - error_report("%s: Failed to set state STOPPED", vbasedev->name);
> - return ret;
> - }
> -
> - trace_vfio_save_complete_precopy(vbasedev->name);
> - return ret;
> -}
> -
> static void vfio_save_state(QEMUFile *f, void *opaque)
> {
> VFIODevice *vbasedev = opaque;
> @@ -783,33 +295,6 @@ static int vfio_load_setup(QEMUFile *f, void *opaque)
> vbasedev->migration->device_state);
> }
>
> -static int vfio_v1_load_setup(QEMUFile *f, void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - int ret = 0;
> -
> - if (migration->region.mmaps) {
> - ret = vfio_region_mmap(&migration->region);
> - if (ret) {
> - error_report("%s: Failed to mmap VFIO migration region %d: %s",
> - vbasedev->name, migration->region.nr,
> - strerror(-ret));
> - error_report("%s: Falling back to slow path", vbasedev->name);
> - }
> - }
> -
> - ret = vfio_migration_v1_set_state(vbasedev, ~VFIO_DEVICE_STATE_MASK,
> - VFIO_DEVICE_STATE_V1_RESUMING);
> - if (ret) {
> - error_report("%s: Failed to set state RESUMING", vbasedev->name);
> - if (migration->region.mmaps) {
> - vfio_region_unmap(&migration->region);
> - }
> - }
> - return ret;
> -}
> -
> static int vfio_load_cleanup(void *opaque)
> {
> VFIODevice *vbasedev = opaque;
> @@ -819,15 +304,6 @@ static int vfio_load_cleanup(void *opaque)
> return 0;
> }
>
> -static int vfio_v1_load_cleanup(void *opaque)
> -{
> - VFIODevice *vbasedev = opaque;
> -
> - vfio_migration_v1_cleanup(vbasedev);
> - trace_vfio_load_cleanup(vbasedev->name);
> - return 0;
> -}
> -
> static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
> {
> VFIODevice *vbasedev = opaque;
> @@ -861,11 +337,7 @@ static int vfio_load_state(QEMUFile *f, void *opaque, int version_id)
> uint64_t data_size = qemu_get_be64(f);
>
> if (data_size) {
> - if (vbasedev->migration->v2) {
> - ret = vfio_load_buffer(f, vbasedev, data_size);
> - } else {
> - ret = vfio_v1_load_buffer(f, vbasedev, data_size);
> - }
> + ret = vfio_load_buffer(f, vbasedev, data_size);
> if (ret < 0) {
> return ret;
> }
> @@ -896,18 +368,6 @@ static SaveVMHandlers savevm_vfio_handlers = {
> .load_state = vfio_load_state,
> };
>
> -static SaveVMHandlers savevm_vfio_v1_handlers = {
> - .save_setup = vfio_v1_save_setup,
> - .save_cleanup = vfio_v1_save_cleanup,
> - .save_live_pending = vfio_save_pending,
> - .save_live_iterate = vfio_save_iterate,
> - .save_live_complete_precopy = vfio_v1_save_complete_precopy,
> - .save_state = vfio_save_state,
> - .load_setup = vfio_v1_load_setup,
> - .load_cleanup = vfio_v1_load_cleanup,
> - .load_state = vfio_load_state,
> -};
> -
> /* ---------------------------------------------------------------------- */
>
> static void vfio_vmstate_change(void *opaque, bool running, RunState state)
> @@ -938,70 +398,12 @@ static void vfio_vmstate_change(void *opaque, bool running, RunState state)
> new_state);
> }
>
> -static void vfio_v1_vmstate_change(void *opaque, bool running, RunState state)
> -{
> - VFIODevice *vbasedev = opaque;
> - VFIOMigration *migration = vbasedev->migration;
> - uint32_t value, mask;
> - int ret;
> -
> - if (vbasedev->migration->vm_running == running) {
> - return;
> - }
> -
> - if (running) {
> - /*
> - * Here device state can have one of _SAVING, _RESUMING or _STOP bit.
> - * Transition from _SAVING to _RUNNING can happen if there is migration
> - * failure, in that case clear _SAVING bit.
> - * Transition from _RESUMING to _RUNNING occurs during resuming
> - * phase, in that case clear _RESUMING bit.
> - * In both the above cases, set _RUNNING bit.
> - */
> - mask = ~VFIO_DEVICE_STATE_MASK;
> - value = VFIO_DEVICE_STATE_V1_RUNNING;
> - } else {
> - /*
> - * Here device state could be either _RUNNING or _SAVING|_RUNNING. Reset
> - * _RUNNING bit
> - */
> - mask = ~VFIO_DEVICE_STATE_V1_RUNNING;
> -
> - /*
> - * When VM state transition to stop for savevm command, device should
> - * start saving data.
> - */
> - if (state == RUN_STATE_SAVE_VM) {
> - value = VFIO_DEVICE_STATE_V1_SAVING;
> - } else {
> - value = 0;
> - }
> - }
> -
> - ret = vfio_migration_v1_set_state(vbasedev, mask, value);
> - if (ret) {
> - /*
> - * Migration should be aborted in this case, but vm_state_notify()
> - * currently does not support reporting failures.
> - */
> - error_report("%s: Failed to set device state 0x%x", vbasedev->name,
> - (migration->device_state_v1 & mask) | value);
> - if (migrate_get_current()->to_dst_file) {
> - qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
> - }
> - }
> - vbasedev->migration->vm_running = running;
> - trace_vfio_vmstate_change(vbasedev->name, running, RunState_str(state),
> - (migration->device_state_v1 & mask) | value);
> -}
> -
> static void vfio_migration_state_notifier(Notifier *notifier, void *data)
> {
> MigrationState *s = data;
> VFIOMigration *migration = container_of(notifier, VFIOMigration,
> migration_state);
> VFIODevice *vbasedev = migration->vbasedev;
> - int ret;
>
> trace_vfio_migration_state_notifier(vbasedev->name,
> MigrationStatus_str(s->state));
> @@ -1011,31 +413,14 @@ static void vfio_migration_state_notifier(Notifier *notifier, void *data)
> case MIGRATION_STATUS_CANCELLED:
> case MIGRATION_STATUS_FAILED:
> bytes_transferred = 0;
> - if (migration->v2) {
> - vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RUNNING,
> - VFIO_DEVICE_STATE_ERROR);
> - } else {
> - ret = vfio_migration_v1_set_state(vbasedev,
> - ~(VFIO_DEVICE_STATE_V1_SAVING |
> - VFIO_DEVICE_STATE_V1_RESUMING),
> - VFIO_DEVICE_STATE_V1_RUNNING);
> - if (ret) {
> - error_report("%s: Failed to set state RUNNING", vbasedev->name);
> - }
> - }
> + vfio_migration_set_state(vbasedev, VFIO_DEVICE_STATE_RUNNING,
> + VFIO_DEVICE_STATE_ERROR);
> }
> }
>
> static void vfio_migration_exit(VFIODevice *vbasedev)
> {
> - VFIOMigration *migration = vbasedev->migration;
> -
> - if (migration->v2) {
> - g_free(migration->data_buffer);
> - } else {
> - vfio_region_exit(&migration->region);
> - vfio_region_finalize(&migration->region);
> - }
> + g_free(vbasedev->migration->data_buffer);
> g_free(vbasedev->migration);
> vbasedev->migration = NULL;
> }
> @@ -1066,7 +451,6 @@ static int vfio_migration_init(VFIODevice *vbasedev)
> VFIOMigration *migration;
> char id[256] = "";
> g_autofree char *path = NULL, *oid = NULL;
> - struct vfio_region_info *info = NULL;
> uint64_t mig_flags;
>
> if (!vbasedev->ops->vfio_get_object) {
> @@ -1079,48 +463,20 @@ static int vfio_migration_init(VFIODevice *vbasedev)
> }
>
> ret = vfio_migration_query_flags(vbasedev, &mig_flags);
> - if (!ret) {
> - /* Migration v2 */
> - /* Basic migration functionality must be supported */
> - if (!(mig_flags & VFIO_MIGRATION_STOP_COPY)) {
> - return -EOPNOTSUPP;
> - }
> - vbasedev->migration = g_new0(VFIOMigration, 1);
> - vbasedev->migration->data_buffer_size = VFIO_MIG_DATA_BUFFER_SIZE;
> - vbasedev->migration->data_buffer =
> - g_malloc0(vbasedev->migration->data_buffer_size);
> - vbasedev->migration->data_fd = -1;
> - vbasedev->migration->v2 = true;
> - } else {
> - /* Migration v1 */
> - ret = vfio_get_dev_region_info(vbasedev,
> - VFIO_REGION_TYPE_MIGRATION_DEPRECATED,
> - VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED,
> - &info);
> - if (ret) {
> - return ret;
> - }
> -
> - vbasedev->migration = g_new0(VFIOMigration, 1);
> -
> - ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region,
> - info->index, "migration");
> - if (ret) {
> - error_report("%s: Failed to setup VFIO migration region %d: %s",
> - vbasedev->name, info->index, strerror(-ret));
> - goto err;
> - }
> -
> - if (!vbasedev->migration->region.size) {
> - error_report("%s: Invalid zero-sized VFIO migration region %d",
> - vbasedev->name, info->index);
> - ret = -EINVAL;
> - goto err;
> - }
> + if (ret) {
> + return ret;
> + }
>
> - g_free(info);
> + /* Basic migration functionality must be supported */
> + if (!(mig_flags & VFIO_MIGRATION_STOP_COPY)) {
> + return -EOPNOTSUPP;
> }
>
> + vbasedev->migration = g_new0(VFIOMigration, 1);
> + vbasedev->migration->data_buffer_size = VFIO_MIG_DATA_BUFFER_SIZE;
> + vbasedev->migration->data_buffer =
> + g_malloc0(vbasedev->migration->data_buffer_size);
> + vbasedev->migration->data_fd = -1;
> migration = vbasedev->migration;
> migration->vbasedev = vbasedev;
>
> @@ -1132,28 +488,16 @@ static int vfio_migration_init(VFIODevice *vbasedev)
> }
> strpadcpy(id, sizeof(id), path, '\0');
>
> - if (migration->v2) {
> - register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1,
> - &savevm_vfio_handlers, vbasedev);
> -
> - migration->vm_state = qdev_add_vm_change_state_handler(
> - vbasedev->dev, vfio_vmstate_change, vbasedev);
> - } else {
> - register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1,
> - &savevm_vfio_v1_handlers, vbasedev);
> -
> - migration->vm_state = qdev_add_vm_change_state_handler(
> - vbasedev->dev, vfio_v1_vmstate_change, vbasedev);
> - }
> + register_savevm_live(id, VMSTATE_INSTANCE_ID_ANY, 1, &savevm_vfio_handlers,
> + vbasedev);
>
> + migration->vm_state = qdev_add_vm_change_state_handler(vbasedev->dev,
> + vfio_vmstate_change,
> + vbasedev);
> migration->migration_state.notify = vfio_migration_state_notifier;
> add_migration_state_change_notifier(&migration->migration_state);
> - return 0;
>
> -err:
> - g_free(info);
> - vfio_migration_exit(vbasedev);
> - return ret;
> + return 0;
> }
>
> /* ---------------------------------------------------------------------- */
> diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
> index 6e8c5958b9..a24ea7d8b0 100644
> --- a/hw/vfio/trace-events
> +++ b/hw/vfio/trace-events
> @@ -154,15 +154,10 @@ vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t
> vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s"
> vfio_save_setup(const char *name) " (%s)"
> vfio_save_cleanup(const char *name) " (%s)"
> -vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64
> -vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64
> vfio_save_device_config_state(const char *name) " (%s)"
> -vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64
> -vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d"
> vfio_save_complete_precopy(const char *name) " (%s)"
> vfio_load_device_config_state(const char *name) " (%s)"
> vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64
> -vfio_v1_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64
> vfio_load_state_device_data(const char *name, uint64_t data_size) " (%s) size 0x%"PRIx64
> vfio_load_cleanup(const char *name) " (%s)"
> vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 2ec3346fea..76d470178f 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -61,16 +61,11 @@ typedef struct VFIORegion {
> typedef struct VFIOMigration {
> struct VFIODevice *vbasedev;
> VMChangeStateEntry *vm_state;
> - VFIORegion region;
> - uint32_t device_state_v1;
> - int vm_running;
> Notifier migration_state;
> - uint64_t pending_bytes;
> enum vfio_device_mig_state device_state;
> int data_fd;
> void *data_buffer;
> size_t data_buffer_size;
> - bool v2;
> } VFIOMigration;
>
> typedef struct VFIOAddressSpace {
> --
> 2.21.3
>
>
2 years, 2 months
[PATCH v2 00/17] jobs: finish generalization
by Kristina Hanicova
v1 here: https://listman.redhat.com/archives/libvir-list/2022-August/233908.html
diff to v1:
* changes in the first commit: typo, currentAPI in warning I forgot to
move and renamed VIR_FROM_HYPERV to VIR_FROM_NONE as Jano suggested
Kristina Hanicova (17):
qemu & hypervisor: move qemuDomainObjBeginJobInternal() into
hypervisor
libxl: remove usage of virDomainJobData
move files: hypervisor/domain_job -> conf/virdomainjob
virdomainjob: add check for callbacks
conf: extend xmlopt with job config & add job object into domain
object
virdomainjob: make drivers use job object in the domain object
qemu: use virDomainObjBeginJob()
libxl: use virDomainObjBeginJob()
LXC: use virDomainObjBeginJob()
CH: use virDomainObjBeginJob()
qemu: use virDomainObjEndJob()
libxl: use virDomainObjEndJob()
LXC: use virDomainObjEndJob()
CH: use virDomainObjEndJob()
qemu & conf: move BeginAgentJob & EndAgentJob into
src/conf/virdomainjob
qemu & conf: move BeginAsyncJob & EndAsyncJob into src/conf
qemu & conf: move BeginNestedJob & BeginJobNowait into src/conf
docs/kbase/internals/qemu-threads.rst | 34 +-
po/POTFILES | 1 +
src/bhyve/bhyve_domain.c | 2 +-
src/ch/ch_conf.c | 2 +-
src/ch/ch_domain.c | 76 --
src/ch/ch_domain.h | 11 +-
src/ch/ch_driver.c | 40 +-
src/conf/domain_conf.c | 14 +-
src/conf/domain_conf.h | 16 +-
src/conf/meson.build | 1 +
src/conf/virconftypes.h | 2 +
src/conf/virdomainjob.c | 665 ++++++++++++++++++
.../domain_job.h => conf/virdomainjob.h} | 37 +-
src/hyperv/hyperv_driver.c | 2 +-
src/hypervisor/domain_job.c | 249 -------
src/hypervisor/meson.build | 1 -
src/libvirt_private.syms | 52 +-
src/libxl/libxl_conf.c | 2 +-
src/libxl/libxl_domain.c | 113 +--
src/libxl/libxl_domain.h | 18 +-
src/libxl/libxl_driver.c | 123 ++--
src/libxl/libxl_migration.c | 20 +-
src/lxc/lxc_conf.c | 2 +-
src/lxc/lxc_domain.c | 86 ---
src/lxc/lxc_domain.h | 14 +-
src/lxc/lxc_driver.c | 103 ++-
src/openvz/openvz_conf.c | 2 +-
src/qemu/qemu_backup.c | 24 +-
src/qemu/qemu_checkpoint.c | 12 +-
src/qemu/qemu_conf.c | 7 +-
src/qemu/qemu_domain.c | 83 ++-
src/qemu/qemu_domain.h | 3 +-
src/qemu/qemu_domainjob.c | 560 ++-------------
src/qemu/qemu_domainjob.h | 25 +-
src/qemu/qemu_driver.c | 482 +++++++------
src/qemu/qemu_migration.c | 201 +++---
src/qemu/qemu_migration_cookie.c | 17 +-
src/qemu/qemu_migration_cookie.h | 3 +-
src/qemu/qemu_migration_params.c | 8 +-
src/qemu/qemu_process.c | 95 ++-
src/qemu/qemu_snapshot.c | 20 +-
src/security/virt-aa-helper.c | 2 +-
src/test/test_driver.c | 2 +-
src/vbox/vbox_common.c | 2 +-
src/vmware/vmware_driver.c | 2 +-
src/vmx/vmx.c | 2 +-
src/vz/vz_driver.c | 2 +-
tests/bhyveargv2xmltest.c | 2 +-
tests/qemumigrationcookiexmltest.c | 3 +-
tests/testutils.c | 2 +-
50 files changed, 1476 insertions(+), 1771 deletions(-)
create mode 100644 src/conf/virdomainjob.c
rename src/{hypervisor/domain_job.h => conf/virdomainjob.h} (86%)
delete mode 100644 src/hypervisor/domain_job.c
--
2.37.2
2 years, 2 months
[PATCH] Log alarms should be generated even if the QEMU process is not stopped
by dinglimin
Signed-off-by: dinglimin <dinglimin(a)cmss.chinamobile.com>
---
src/qemu/qemu_process.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index cbfdd3bda5..e025e10873 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -8006,9 +8006,11 @@ qemuProcessKill(virDomainObj *vm, unsigned int flags)
}
if (flags & VIR_QEMU_PROCESS_KILL_NOWAIT) {
- virProcessKill(vm->pid,
+ if (virProcessKill(vm->pid,
(flags & VIR_QEMU_PROCESS_KILL_FORCE) ?
- SIGKILL : SIGTERM);
+ SIGKILL : SIGTERM) != 0) {
+ VIR_DEBUG("Failed to terminate qemu process");
+ }
return 0;
}
--
2.30.0.windows.2
2 years, 2 months
[PATCH] Two branches in a conditional structure should not have exactly the same implementation
by dinglimin
Signed-off-by: dinglimin <dinglimin(a)cmss.chinamobile.com>
---
scripts/apibuild.py | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/scripts/apibuild.py b/scripts/apibuild.py
index c232b4e2c8..5993318490 100755
--- a/scripts/apibuild.py
+++ b/scripts/apibuild.py
@@ -1248,10 +1248,8 @@ class CParser:
token[1] == "," or token[1] == "="):
self.index_add_ref(oldtok[1], self.filename,
0, "type")
- elif oldtok[0] == "name" and oldtok[1][0:4] == "XEN_":
- self.index_add_ref(oldtok[1], self.filename,
- 0, "typedef")
- elif oldtok[0] == "name" and oldtok[1][0:7] == "LIBXEN_":
+ elif oldtok[0] == "name" and (oldtok[1][0:4] == "XEN_" or
+ oldtok[1][0:7] == "LIBXEN_"):
self.index_add_ref(oldtok[1], self.filename,
0, "typedef")
--
2.30.0.windows.2
2 years, 2 months
[PATCH 0/3] domain capabilites improvements
by Jim Fehlig
The first two patches add USB redirect and channel devices to
domcapabilities. Patch3 fixes reporting of spice support.
Patch1 was sent previously, before I noticed the others were needed
https://listman.redhat.com/archives/libvir-list/2022-August/234045.html
Jim Fehlig (3):
conf: Add USB redirect devices to domain capabilities
conf: Add channel devices to domain capabilities
qemu: Use command line to properly check for spice support
docs/formatdomaincaps.rst | 46 +++++++++++++++++++
src/conf/domain_capabilities.c | 26 +++++++++++
src/conf/domain_capabilities.h | 16 +++++++
src/conf/schemas/domaincaps.rng | 20 ++++++++
src/qemu/qemu_capabilities.c | 35 +++++++++++++-
src/qemu/qemu_capabilities.h | 6 +++
.../domaincapsdata/qemu_4.2.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_4.2.0-tcg.x86_64.xml | 12 +++++
.../qemu_4.2.0-virt.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_4.2.0.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_4.2.0.ppc64.xml | 7 +++
tests/domaincapsdata/qemu_4.2.0.s390x.xml | 11 +++++
tests/domaincapsdata/qemu_4.2.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.0.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.0.0-tcg.x86_64.xml | 12 +++++
.../qemu_5.0.0-virt.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_5.0.0.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_5.0.0.ppc64.xml | 7 +++
tests/domaincapsdata/qemu_5.0.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.1.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.1.0-tcg.x86_64.xml | 12 +++++
tests/domaincapsdata/qemu_5.1.0.sparc.xml | 12 +++++
tests/domaincapsdata/qemu_5.1.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.2.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_5.2.0-tcg.x86_64.xml | 12 +++++
.../qemu_5.2.0-virt.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_5.2.0.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_5.2.0.ppc64.xml | 7 +++
tests/domaincapsdata/qemu_5.2.0.s390x.xml | 11 +++++
tests/domaincapsdata/qemu_5.2.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.0.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.0.0-tcg.x86_64.xml | 12 +++++
.../qemu_6.0.0-virt.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_6.0.0.aarch64.xml | 7 +++
tests/domaincapsdata/qemu_6.0.0.s390x.xml | 11 +++++
tests/domaincapsdata/qemu_6.0.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.1.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.1.0-tcg.x86_64.xml | 12 +++++
tests/domaincapsdata/qemu_6.1.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.2.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_6.2.0-tcg.x86_64.xml | 12 +++++
.../qemu_6.2.0-virt.aarch64.xml | 12 +++++
tests/domaincapsdata/qemu_6.2.0.aarch64.xml | 12 +++++
tests/domaincapsdata/qemu_6.2.0.ppc64.xml | 7 +++
tests/domaincapsdata/qemu_6.2.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_7.0.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_7.0.0-tcg.x86_64.xml | 12 +++++
.../qemu_7.0.0-virt.aarch64.xml | 12 +++++
tests/domaincapsdata/qemu_7.0.0.aarch64.xml | 12 +++++
tests/domaincapsdata/qemu_7.0.0.ppc64.xml | 11 +++++
tests/domaincapsdata/qemu_7.0.0.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_7.1.0-q35.x86_64.xml | 12 +++++
.../domaincapsdata/qemu_7.1.0-tcg.x86_64.xml | 12 +++++
tests/domaincapsdata/qemu_7.1.0.x86_64.xml | 12 +++++
.../caps_4.2.0.x86_64.xml | 1 +
.../caps_5.0.0.riscv64.xml | 1 +
.../caps_5.0.0.x86_64.xml | 1 +
.../qemucapabilitiesdata/caps_5.1.0.sparc.xml | 1 +
.../caps_5.1.0.x86_64.xml | 1 +
.../caps_5.2.0.riscv64.xml | 1 +
.../caps_5.2.0.x86_64.xml | 1 +
.../caps_6.0.0.x86_64.xml | 1 +
.../caps_6.1.0.x86_64.xml | 1 +
.../caps_6.2.0.aarch64.xml | 1 +
.../caps_6.2.0.x86_64.xml | 1 +
.../caps_7.0.0.aarch64.xml | 1 +
.../caps_7.0.0.x86_64.xml | 1 +
.../caps_7.1.0.x86_64.xml | 1 +
68 files changed, 674 insertions(+), 1 deletion(-)
--
2.37.1
2 years, 2 months
Adding support for multiple PCI domains (multiple root controllers)
by Richard Hansen
I would like to add support for multiple PCI domains to libvirt. My main motivation is bug #360 [1], though it would also make pSeries machine configurations more intuitive (they can have multiple pci-root controllers).
Currently, every PCI controller is in PCI domain 0, and the controller's index number equals its bus ID. To support multiple domains, my plan is to map domain:bus ID pairs to/from controller index numbers as follows:
* A root controller with index N establishes domain N, and provides bus 0 in that domain.
* A non-root controller with index N, and plugged in to a PCI controller in domain D, provides bus N in domain D.
Furthermore, each non-root controller must have a higher index number than the controller it plugs in to, so the controller with index 0 is guaranteed to be a root controller. This means that in the common case of only a single domain, the behavior would be the same as it is now (domain == 0, controller index == bus ID).
Example domain:bus pairs:
* 0000:00 would identify the bus provided by the root controller with index 0 (as it does now).
* 000f:1b would identify the bus provided by the non-root controller with index 27 (0x1b), and that controller is plugged into domain 15 (0xf).
Implications of this scheme:
* Bus IDs never match any domain IDs (exception: bus 0 in domain 0).
* Bus IDs never appear in more than one domain (exception: bus 0 appears in every domain).
* There can be gaps between domain IDs.
* There can be gaps between bus IDs in a domain.
* Each bus's ID is greater than its domain's ID (except for bus 0 in each domain).
The gaps between IDs means that the current PCI address set buses array [2] would have to be replaced with a different data structure. I would probably use nested GHashTables, where the first dimension is the domain and the second dimension is the bus.
The default names generated for QEMU would continue to be "pci.N" (or "pcie.N") where N is the PCI controller's index number.
The PCI specification says that bus IDs are 8 bits, but libvirt bus IDs are independent of the bus IDs seen by the guest so this limitation doesn't need to exist in libvirt configs. As long as each domain has no more than 256 buses, it should be possible to support arbitrarily large bus IDs. (I doubt this will ever be an issue in practice, but I thought it would be worth mentioning.)
Thoughts?
Thanks,
Richard
[1] https://gitlab.com/libvirt/libvirt/-/issues/360
[2] https://gitlab.com/libvirt/libvirt/-/blob/v8.7.0/src/conf/domain_addr.h#L...
2 years, 2 months
[PATCH v2 21/30] Deprecate 32 bit big-endian MIPS
by Alex Bennée
It's becoming harder to maintain a cross-compiler to test this host
architecture as the old stable Debian 10 ("Buster") moved into LTS
which supports fewer architectures. For now:
- mark it's deprecation in the docs
- downgrade the containers to build TCG tests only
- drop the cross builds from our CI
Users with an appropriate toolchain and user-space can still take
their chances building it.
Signed-off-by: Alex Bennée <alex.bennee(a)linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug(a)amsat.org>
Reviewed-by: Huacai Chen <chenhuacai(a)kernel.org>
Message-Id: <20220826172128.353798-16-alex.bennee(a)linaro.org>
---
v2
- explicit little endian instead of LE
- s/A/As/
- restore mips to dockerfile
---
docs/about/build-platforms.rst | 2 +-
docs/about/deprecated.rst | 13 +++++++
.gitlab-ci.d/container-cross.yml | 1 -
.gitlab-ci.d/crossbuilds.yml | 14 -------
tests/docker/Makefile.include | 5 +--
.../dockerfiles/debian-mips-cross.docker | 38 +++++--------------
6 files changed, 26 insertions(+), 47 deletions(-)
diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst
index a2fee53248..1c1e7b9e11 100644
--- a/docs/about/build-platforms.rst
+++ b/docs/about/build-platforms.rst
@@ -41,7 +41,7 @@ Those hosts are officially supported, with various accelerators:
- Accelerators
* - Arm
- kvm (64 bit only), tcg, xen
- * - MIPS
+ * - MIPS (little endian only)
- kvm, tcg
* - PPC
- kvm, tcg
diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst
index c75a25daad..0d1fd4469b 100644
--- a/docs/about/deprecated.rst
+++ b/docs/about/deprecated.rst
@@ -213,6 +213,19 @@ MIPS ``Trap-and-Emul`` KVM support (since 6.0)
The MIPS ``Trap-and-Emul`` KVM host and guest support has been removed
from Linux upstream kernel, declare it deprecated.
+Host Architectures
+------------------
+
+BE MIPS (since 7.2)
+'''''''''''''''''''
+
+As Debian 10 ("Buster") moved into LTS the big endian 32 bit version of
+MIPS moved out of support making it hard to maintain our
+cross-compilation CI tests of the architecture. As we no longer have
+CI coverage support may bitrot away before the deprecation process
+completes. The little endian variants of MIPS (both 32 and 64 bit) are
+still a supported host architecture.
+
QEMU API (QAPI) events
----------------------
diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml
index 611c6c0b39..95d57e1c5d 100644
--- a/.gitlab-ci.d/container-cross.yml
+++ b/.gitlab-ci.d/container-cross.yml
@@ -89,7 +89,6 @@ mips64el-debian-cross-container:
mips-debian-cross-container:
extends: .container_job_template
stage: containers
- needs: ['amd64-debian10-container']
variables:
NAME: debian-mips-cross
diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml
index 4a5fb6ea2a..c4cd96433d 100644
--- a/.gitlab-ci.d/crossbuilds.yml
+++ b/.gitlab-ci.d/crossbuilds.yml
@@ -70,20 +70,6 @@ cross-i386-tci:
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
MAKE_CHECK_ARGS: check check-tcg
-cross-mips-system:
- extends: .cross_system_build_job
- needs:
- job: mips-debian-cross-container
- variables:
- IMAGE: debian-mips-cross
-
-cross-mips-user:
- extends: .cross_user_build_job
- needs:
- job: mips-debian-cross-container
- variables:
- IMAGE: debian-mips-cross
-
cross-mipsel-system:
extends: .cross_system_build_job
needs:
diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include
index c3375f89c5..b1bf56434f 100644
--- a/tests/docker/Makefile.include
+++ b/tests/docker/Makefile.include
@@ -81,14 +81,12 @@ endif
# For non-x86 hosts not all cross-compilers have been packaged
ifneq ($(HOST_ARCH),x86_64)
-DOCKER_PARTIAL_IMAGES += debian-mips-cross debian-mipsel-cross debian-mips64el-cross
+DOCKER_PARTIAL_IMAGES += debian-mipsel-cross debian-mips64el-cross
DOCKER_PARTIAL_IMAGES += debian-ppc64el-cross
DOCKER_PARTIAL_IMAGES += debian-s390x-cross
DOCKER_PARTIAL_IMAGES += fedora
endif
-docker-image-debian-mips-cross: docker-image-debian10
-
# The native build should never use the registry
docker-image-debian-native: DOCKER_REGISTRY=
@@ -144,6 +142,7 @@ DOCKER_PARTIAL_IMAGES += debian-hppa-cross
DOCKER_PARTIAL_IMAGES += debian-loongarch-cross
DOCKER_PARTIAL_IMAGES += debian-m68k-cross debian-mips64-cross
DOCKER_PARTIAL_IMAGES += debian-microblaze-cross
+DOCKER_PARTIAL_IMAGES += debian-mips-cross
DOCKER_PARTIAL_IMAGES += debian-nios2-cross
DOCKER_PARTIAL_IMAGES += debian-riscv64-test-cross
DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross
diff --git a/tests/docker/dockerfiles/debian-mips-cross.docker b/tests/docker/dockerfiles/debian-mips-cross.docker
index 26c154014d..7b55f0f3b2 100644
--- a/tests/docker/dockerfiles/debian-mips-cross.docker
+++ b/tests/docker/dockerfiles/debian-mips-cross.docker
@@ -1,32 +1,14 @@
#
# Docker mips cross-compiler target
#
-# This docker target builds on the debian Buster base image.
+# This docker target builds on the Debian Bullseye base image.
#
-FROM qemu/debian10
-
-MAINTAINER Philippe Mathieu-Daudé <f4bug(a)amsat.org>
-
-# Add the foreign architecture we want and install dependencies
-RUN dpkg --add-architecture mips
-RUN apt update && \
- DEBIAN_FRONTEND=noninteractive eatmydata \
- apt install -y --no-install-recommends \
- gcc-mips-linux-gnu
-
-RUN apt update && \
- DEBIAN_FRONTEND=noninteractive eatmydata \
- apt build-dep -yy -a mips --arch-only qemu
-
-# Specify the cross prefix for this image (see tests/docker/common.rc)
-ENV QEMU_CONFIGURE_OPTS --cross-prefix=mips-linux-gnu-
-ENV DEF_TARGET_LIST mips-softmmu,mipsel-linux-user
-
-# Install extra libraries to increase code coverage
-RUN apt update && \
- DEBIAN_FRONTEND=noninteractive eatmydata \
- apt install -y --no-install-recommends \
- libbz2-dev:mips \
- liblzo2-dev:mips \
- librdmacm-dev:mips \
- libsnappy-dev:mips
+FROM docker.io/library/debian:11-slim
+
+RUN export DEBIAN_FRONTEND=noninteractive && \
+ apt-get update && \
+ apt-get install -y eatmydata && \
+ eatmydata apt-get dist-upgrade -y && \
+ eatmydata apt-get install --no-install-recommends -y \
+ gcc-mips-linux-gnu \
+ libc6-dev-mips-cross
--
2.34.1
2 years, 2 months
[PATCH] meson: Require libssh-0.8.0 or newer
by Michal Privoznik
According to repology.org:
RHEL-8: 0.9.4
RHEL-9: 0.9.6
Debian 11: 0.9.5
openSUSE Leap 15.3: 0.8.7
Ubuntu 20.04: 0.9.3
And the rest of distros has something newer anyways. Requiring
0.8.0 or newer allows us to drop the terrible hack where we
rename functions at meson level using #define. Note, 0.8.0 is
the version of libssh where the rename happened.
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
libvirt.spec.in | 2 +-
meson.build | 15 +--------------
2 files changed, 2 insertions(+), 15 deletions(-)
diff --git a/libvirt.spec.in b/libvirt.spec.in
index b199c624b8..a46389c78f 100644
--- a/libvirt.spec.in
+++ b/libvirt.spec.in
@@ -378,7 +378,7 @@ BuildRequires: wireshark-devel
%endif
%if %{with_libssh}
-BuildRequires: libssh-devel >= 0.7.0
+BuildRequires: libssh-devel >= 0.8.0
%endif
BuildRequires: rpcgen
diff --git a/meson.build b/meson.build
index ed9f4b3f70..11490b8980 100644
--- a/meson.build
+++ b/meson.build
@@ -1025,24 +1025,11 @@ else
libpcap_dep = dependency('', required: false)
endif
-libssh_version = '0.7'
+libssh_version = '0.8.0'
if conf.has('WITH_REMOTE')
libssh_dep = dependency('libssh', version: '>=' + libssh_version, required: get_option('libssh'))
if libssh_dep.found()
conf.set('WITH_LIBSSH', 1)
-
- # Check if new functions exists, if not redefine them with old deprecated ones.
- # List of [ new_function, deprecated_function ].
- functions = [
- [ 'ssh_get_server_publickey', 'ssh_get_publickey' ],
- [ 'ssh_session_is_known_server', 'ssh_is_server_known' ],
- [ 'ssh_session_update_known_hosts', 'ssh_write_knownhost' ],
- ]
- foreach name : functions
- if not cc.has_function(name[0], dependencies: libssh_dep)
- conf.set(name[0], name[1])
- endif
- endforeach
endif
else
libssh_dep = dependency('', required: false)
--
2.35.1
2 years, 2 months