On 09/16/2016 05:14 PM, Marek Marczykowski-Górecki wrote:
On Fri, Sep 16, 2016 at 04:39:23PM -0600, Jim Fehlig wrote:
> On 08/05/2016 12:05 PM, Marek Marczykowski-Górecki wrote:
>> Since this is something between PV and HVM, it makes sense to put the
>> setting in place where domain type is specified.
>> To enable it, use <os><type
machine="xenpvh">...</type></os>. It is
>> also included in capabilities.xml, for every supported HVM guest type - it
>> doesn't seems to be any other requirement (besides new enough Xen).
>>
>> Signed-off-by: Marek Marczykowski-Górecki
<marmarek(a)invisiblethingslab.com>
>> ---
>> src/libxl/libxl_capabilities.c | 40 +++++++++++++++++++++++++++++++---------
>> src/libxl/libxl_conf.c | 2 ++
>> src/libxl/libxl_driver.c | 6 ++++--
>> 3 files changed, 37 insertions(+), 11 deletions(-)
> I didn't investigate, but this patch did not apply cleanly.
>
> Does 'xenpvh' need to be added to docs/schema/domaincommon.rng? The schema
looks
> dated anyhow since it currently contains 'xenpv' and 'xenner'. And
perhaps this
> value should be added to docs/formatdomain.html.in, along with a sentence about
> the possible values for Xen machines.
After further evaluation[1], PVHv1 is not the thing I wanted here. And
PVHv2 is going to be significantly different. While this patch do work
for me, I'm not going to spend more time on PVHv1.
Ok. I'm not a fan of supporting PVHv1 in libvirt anyhow. It came and went before
anyone even used it :-).
>> diff --git a/src/libxl/libxl_capabilities.c b/src/libxl/libxl_capabilities.c
>> index 0145116..c443353 100644
>> --- a/src/libxl/libxl_capabilities.c
>> +++ b/src/libxl/libxl_capabilities.c
>> @@ -45,11 +45,16 @@ VIR_LOG_INIT("libxl.libxl_capabilities");
>> /* see xen-unstable.hg/xen/include/asm-x86/cpufeature.h */
>> #define LIBXL_X86_FEATURE_PAE_MASK 0x40
>>
>> +enum machine_type {
>> + machine_hvm,
>> + machine_pvh,
>> + machine_pv,
>> +};
>>
>> struct guest_arch {
>> virArch arch;
>> int bits;
>> - int hvm;
>> + enum machine_type machine;
>> int pae;
>> int nonpae;
>> int ia64_be;
>> @@ -296,7 +301,7 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps)
>> /* Search for existing matching (model,hvm) tuple */
>> for (i = 0; i < nr_guest_archs; i++) {
>> if ((guest_archs[i].arch == arch) &&
>> - guest_archs[i].hvm == hvm)
>> + guest_archs[i].machine == (hvm ? machine_hvm : machine_pv))
>> break;
>> }
>>
>> @@ -308,7 +313,7 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps)
>> nr_guest_archs++;
>>
>> guest_archs[i].arch = arch;
>> - guest_archs[i].hvm = hvm;
>> + guest_archs[i].machine = hvm ? machine_hvm : machine_pv;
>>
>> /* Careful not to overwrite a previous positive
>> setting with a negative one here - some archs
>> @@ -320,23 +325,40 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps)
>> guest_archs[i].nonpae = nonpae;
>> if (ia64_be)
>> guest_archs[i].ia64_be = ia64_be;
>> +
>> + /* On Xen >= 4.4 add PVH for each HVM guest, and do it only once
*/
>> + if ((ver_info->xen_version_major > 4 ||
>> + (ver_info->xen_version_major == 4 &&
>> + ver_info->xen_version_minor >= 4)) &&
>> + hvm && i == nr_guest_archs-1) {
>> + i = nr_guest_archs;
>> + /* Too many arch flavours - highly unlikely ! */
>> + if (i >= ARRAY_CARDINALITY(guest_archs))
>> + continue;
>> + nr_guest_archs++;
>> + guest_archs[i].arch = arch;
>> + guest_archs[i].machine = machine_pvh;
>> + }
>> }
>> }
>> regfree(®ex);
>>
>> for (i = 0; i < nr_guest_archs; ++i) {
>> virCapsGuestPtr guest;
>> - char const *const xen_machines[] = {guest_archs[i].hvm ?
"xenfv" : "xenpv"};
>> + char const *const xen_machines[] = {
>> + guest_archs[i].machine == machine_hvm ? "xenfv" :
>> + (guest_archs[i].machine == machine_pvh ? "xenpvh" :
"xenpv")};
>> virCapsGuestMachinePtr *machines;
>>
>> if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL)
>> return -1;
>>
>> if ((guest = virCapabilitiesAddGuest(caps,
>> - guest_archs[i].hvm ?
VIR_DOMAIN_OSTYPE_HVM : VIR_DOMAIN_OSTYPE_XEN,
>> + guest_archs[i].machine ==
machine_hvm ?
>> + VIR_DOMAIN_OSTYPE_HVM :
VIR_DOMAIN_OSTYPE_XEN,
> Is a new VIR_DOMAIN_OSTYPE_XENPVH needed?
Not sure about this. Wouldn't that require adding `os.type ==
VIR_DOMAIN_OSTYPE_XEN || os.type == VIR_DOMAIN_OSTYPE_XENPVH` in a lot
of places? If actual settings are mostly the same, I don't see any
reason for introducing such value.
As long as PVHv2/HVMlite is just another impl of an existing machine type, I agree.
>> guest_archs[i].arch,
>> LIBXL_EXECBIN_DIR
"/qemu-system-i386",
>> - (guest_archs[i].hvm ?
>> + (guest_archs[i].machine ==
machine_hvm ?
>> LIBXL_FIRMWARE_DIR
"/hvmloader" :
>> NULL),
>> 1,
>> @@ -375,7 +397,7 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps)
>> 0) == NULL)
>> return -1;
>>
>> - if (guest_archs[i].hvm) {
>> + if (guest_archs[i].machine != machine_pv) {
>> if (virCapabilitiesAddGuestFeature(guest,
>> "acpi",
>> 1,
>> @@ -390,7 +412,7 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps)
>> if (virCapabilitiesAddGuestFeature(guest,
>> "hap",
>> 1,
>> - 1) == NULL)
>> + guest_archs[i].machine ==
machine_hvm) == NULL)
>> return -1;
>> }
>> }
>> @@ -409,7 +431,7 @@ libxlMakeDomainOSCaps(const char *machine,
>>
>> os->supported = true;
>>
>> - if (STREQ(machine, "xenpv"))
>> + if (STREQ(machine, "xenpv") || STREQ(machine,
"xenpvh"))
>> return 0;
>>
>> capsLoader->supported = true;
>> diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
>> index 5202ca1..aa06586 100644
>> --- a/src/libxl/libxl_conf.c
>> +++ b/src/libxl/libxl_conf.c
>> @@ -173,6 +173,8 @@ libxlMakeDomCreateInfo(libxl_ctx *ctx,
>> }
>> } else {
>> c_info->type = LIBXL_DOMAIN_TYPE_PV;
>> + if (STREQ(def->os.machine, "xenpvh"))
>> + libxl_defbool_set(&c_info->pvh, true);
> I assume this won't change with HVMlite, aka pvh2?
It will, unfortunately. HVMlite is enabled by setting device model to
none.
>> }
>>
>> if (VIR_STRDUP(c_info->name, def->name) < 0)
>> diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c
>> index 4957072..fa58346 100644
>> --- a/src/libxl/libxl_driver.c
>> +++ b/src/libxl/libxl_driver.c
>> @@ -6321,9 +6321,11 @@ libxlConnectGetDomainCapabilities(virConnectPtr conn,
>> emulatorbin = "/usr/bin/qemu-system-x86_64";
>>
>> if (machine) {
>> - if (STRNEQ(machine, "xenpv") && STRNEQ(machine,
"xenfv")) {
>> + if (STRNEQ(machine, "xenpv") &&
>> + STRNEQ(machine, "xenpvh") &&
>> + STRNEQ(machine, "xenfv")) {
>> virReportError(VIR_ERR_INVALID_ARG, "%s",
>> - _("Xen only supports 'xenpv' and
'xenfv' machines"));
>> + _("Xen only supports 'xenpv',
'xenpvh' and 'xenfv' machines"));
>> goto cleanup;
>> }
>> } else {
> WRT domain capabilities, should pvh be treated like pv? I.e. do they both have
> the same max vcpus, etc?
Yes, PVH behave like PV. But PVHv2 like HVM.
Ok, that matches my understanding of PVHv2. It is an alternative HVM impl.
Regards,
Jim
> Also, supporting a new knob in the XML usually means supporting conversion of
> that knob to xl.cfg. Can you add domXML <-> xl.cfg conversion for pvh? And a
> test case for the conversion too please?
I'll add this for PVHv2...
[1]
http://markmail.org/message/c7o7qsc3chkigdzv