On 2014/9/19 20:02, Michal Privoznik wrote:
> diff --git a/src/qemu/qemu_capabilities.c
b/src/qemu/qemu_capabilities.c
> index 360cc67..146d67c 100644
> --- a/src/qemu/qemu_capabilities.c
> +++ b/src/qemu/qemu_capabilities.c
> @@ -265,6 +265,10 @@ VIR_ENUM_IMPL(virQEMUCaps, QEMU_CAPS_LAST,
> "numa",
> "memory-backend-file",
> "usb-audio",
> + "vga-vgamem",
> +
> + "qxl-vgamem", /* 175 */
> + "vmware-vgamem",
> );
>
>
> @@ -1073,6 +1077,14 @@ virQEMUCapsComputeCmdFlags(const char *help,
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_VGA_QXL);
> if ((p = strstr(p, "|none")) && p < nl)
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_VGA_NONE);
> + /* It seems that QEMU supports to be communicated with
> + * qmp command since 1.2.0. When qemuCaps->usedQMP is
> + * true, these logical code will be invalid. Does it need here? */
> + if (version >= 1002000) {
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_VGA_VGAMEM_MB);
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_QXL_VGAMEM_MB);
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_VMWARE_VGAMEM_MB);
At least for cirrus vga this is not true. The vgamem_mb property of cirrus vga was
introduced in 19403a68 (v1.3.0). However, I'd expect these to be set by something else
than version based test. For instance, aren't the properties listen in device prop
query command?
Thanks for your review.
I use "-global VGA.vgamem_mb=16" to support vgamem_mb in the case that QEMU
supports
"-global". From the result of "qemu-kvm --help", I can't get
enough information about
valid args for "-global". So I don't know how to set this capability
according to a
query command.
I search other "-global" command line arguments in qemu_command.c. They are used
without
checking their capabilities. So should I use "-global VGA.vgamem_mb" without
checking
the capability like other "-global" command arguments? Or find a query command
to set
the capability and check it?
What's your opinion.
> + }
> }
> if (strstr(help, "-spice"))
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_SPICE);
> @@ -3034,6 +3046,9 @@ virQEMUCapsInitQMPBasic(virQEMUCapsPtr qemuCaps)
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_DUMP_GUEST_CORE);
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_VNC_SHARE_POLICY);
> virQEMUCapsSet(qemuCaps, QEMU_CAPS_HOST_PCI_MULTIDOMAIN);
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_VGA_VGAMEM_MB);
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_QXL_VGAMEM_MB);
> + virQEMUCapsSet(qemuCaps, QEMU_CAPS_VMWARE_VGAMEM_MB);
No.
> }
>
> /* Capabilities that are architecture depending
> diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
> index 2911759..cdf6920 100644
> --- a/src/qemu/qemu_capabilities.h
> +++ b/src/qemu/qemu_capabilities.h
> @@ -213,6 +213,9 @@ typedef enum {
> QEMU_CAPS_NUMA = 171, /* newer -numa handling with disjoint cpu
ranges */
> QEMU_CAPS_OBJECT_MEMORY_FILE = 172, /* -object memory-backend-file */
> QEMU_CAPS_OBJECT_USB_AUDIO = 173, /* usb-audio device support */
> + QEMU_CAPS_VGA_VGAMEM_MB = 174, /* -global VGA.vgamem_mb */
> + QEMU_CAPS_QXL_VGAMEM_MB = 175, /* -global qxl-vga.vgamem_mb */
> + QEMU_CAPS_VMWARE_VGAMEM_MB = 176, /* -global vmware-svga.vgamem_mb */
>
> QEMU_CAPS_LAST, /* this must always be the last item */
> } virQEMUCapsFlags;
> diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
> index c3f860e..c15099a 100644
> --- a/src/qemu/qemu_command.c
> +++ b/src/qemu/qemu_command.c
> @@ -4827,6 +4827,14 @@ qemuBuildDeviceVideoStr(virDomainDefPtr def,
> virBufferAsprintf(&buf, ",vram_size=%u", video->vram *
1024);
> }
>
> + /* 1. Ignore cirrus-vga as guests would not use it anyway.
> + * 2. QEMU accepts MByte for vgamem_mb and ensure its value
> + * a power of two and range: 1 MB -> 256 MB */
> + if (video->type != VIR_DOMAIN_VIDEO_TYPE_CIRRUS &&
> + video->vgamem > 1024) {
> + virBufferAsprintf(&buf, ",vgamem_mb=%u", video->vgamem /
1024);
> + }
> +
> if (qemuBuildDeviceAddressStr(&buf, def, &video->info, qemuCaps)
< 0)
> goto error;
>
> @@ -8766,36 +8774,86 @@ qemuBuildCommandLine(virConnectPtr conn,
>
> virCommandAddArgList(cmd, "-vga", vgastr, NULL);
>
> - if (def->videos[0]->type == VIR_DOMAIN_VIDEO_TYPE_QXL
&&
> - (def->videos[0]->vram || def->videos[0]->ram)
&&
> - virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE)) {
> - const char *dev = (virQEMUCapsGet(qemuCaps,
QEMU_CAPS_DEVICE_QXL_VGA)
> - ? "qxl-vga" : "qxl");
> + if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE)) {
> + const char *dev = NULL;
> + bool vgamemSupport = true;
> int ram = def->videos[0]->ram;
> int vram = def->videos[0]->vram;
> + int vgamem = def->videos[0]->vgamem;
> + switch (primaryVideoType) {
> + case VIR_DOMAIN_VIDEO_TYPE_VGA:
> + if (virQEMUCapsGet(qemuCaps, QEMU_CAPS_DEVICE_VGA))
> + dev = "VGA";
> + if (dev && vgamem &&
> + !virQEMUCapsGet(qemuCaps, QEMU_CAPS_VGA_VGAMEM_MB)) {
> + vgamemSupport = false;
> + VIR_WARN("This QEMU does not support vgamem "
> + "attribute, ignore it");
Why? If I deliberately set vgamem the domain startup process must fail if qemu
doesn't support it.
Yes, domain startup should fail if user configure vgamem in xml but qemu doesn't
support it.
My intention is for avoiding regression because I set vgamem to a default value if it is
not
configured in xml. Referring to your idea, I'll remove setting the default value and
make startup
failed if qemu doesn't support vgamem. Qemu can give a default value if libvirt does
not.
Is this better?