On 12/20/2011 08:59 PM, Wayne Xia wrote:
Seems it choose the function at compile time, I guess this
result
in two RPMs with different configuration of libvirt-cim. Could we use
the latest the libvirt to compile the codes, but choose the function at
runtime? That would be better to maintain and for user who are not
developer.
The current (default) approach has been to a build time check and link
certain APIs that way. This does mean that we need to have different
libvirt-cim RPMs for different libvirt versions, which is problematic.
I have a few ideas and I think others might to. I believe Gareth is going
to put down some initial thoughts and we can discuss on a dedicated thread.
δΊ 2011-12-20 20:43, Gareth S. Bestor ει:
> Signed-off-by: Gareth S. Bestor<bestor(a)us.ibm.com>
> ---
> libxkutil/xmlgen.c | 45
> +++++++++++++++++++++++++++++
> src/Virt_ComputerSystem.c | 6 ++++
> src/Virt_RASD.c | 6 +++-
> src/Virt_VirtualSystemManagementService.c | 30 +++++++++++++++++++
> 4 files changed, 86 insertions(+), 1 deletions(-)
>
> diff --git a/libxkutil/xmlgen.c b/libxkutil/xmlgen.c
> index 4cca75b..2fe41bf 100644
> --- a/libxkutil/xmlgen.c
> +++ b/libxkutil/xmlgen.c
> @@ -365,6 +365,44 @@ static const char *vcpu_xml(xmlNodePtr root,
> struct domain *dominfo)
> return NULL;
> }
>
> +#if LIBVIR_VERSION_NUMBER>= 9000
> +static const char *cputune_xml(xmlNodePtr root, struct domain *dominfo)
> +{
> + struct vcpu_device *vcpu;
> + xmlNodePtr cputune, tmp;
> + int ret;
> + char *string = NULL;
> +
> + if (dominfo->dev_vcpu == NULL)
> + return NULL;
> +
> + vcpu =&dominfo->dev_vcpu[0].dev.vcpu;
> +
> + /* CPU cgroup setting saved by libvirt under<cputune> XML
> section */
> + cputune = xmlNewChild(root, NULL, BAD_CAST "cputune", NULL);
> + if (cputune == NULL)
> + return XML_ERROR;
> +
> + /* Get the CPU cgroup setting from the VCPU RASD.Weight
> property */
> + ret = asprintf(&string,
> + "%d",
> + vcpu->weight);
> + if (ret == -1)
> + return XML_ERROR;
> +
> + tmp = xmlNewChild(cputune,
> + NULL,
> + BAD_CAST "shares",
> + BAD_CAST string);
> + free(string);
> +
> + if (tmp == NULL)
> + return XML_ERROR;
> + else
> + return NULL;
> +}
> +#endif
> +
> static const char *mem_xml(xmlNodePtr root, struct domain *dominfo)
> {
> struct mem_device *mem;
> @@ -941,6 +979,13 @@ char *system_to_xml(struct domain *dominfo)
> if (msg != NULL)
> goto out;
>
> +#if LIBVIR_VERSION_NUMBER>= 9000
> + /* Recent libvirt versions add new<cputune> section to XML */
> + msg = cputune_xml(root, dominfo);
> + if (msg != NULL)
> + goto out;
> +#endif
> +
> devices = xmlNewChild(root, NULL, BAD_CAST "devices", NULL);
> if (devices == NULL) {
> msg = XML_ERROR;
> diff --git a/src/Virt_ComputerSystem.c b/src/Virt_ComputerSystem.c
> index e1f1cec..098b07a 100644
> --- a/src/Virt_ComputerSystem.c
> +++ b/src/Virt_ComputerSystem.c
> @@ -861,6 +861,11 @@ static int lxc_scheduler_params(struct
> infostore_ctx *ctx,
> static int kvm_scheduler_params(struct infostore_ctx *ctx,
> virSchedParameter **params)
> {
> +#if LIBVIR_VERSION_NUMBER< 9000
> + /* Old versions of libvirt only support CPU cgroups for
> running guests */
> + /* so instead read cpu cgroup setting for inactive guest
> from infostore. */
> + /* New versions there is nothing to do because libvirt takes
> care of it. */
> +
> unsigned long long value;
>
> *params = calloc(1, sizeof(virSchedParameter));
> @@ -878,6 +883,7 @@ static int kvm_scheduler_params(struct
> infostore_ctx *ctx,
>
> return 1;
> }
> +#endif
>
> return 0;
> }
> diff --git a/src/Virt_RASD.c b/src/Virt_RASD.c
> index 9305c8d..0a2de7f 100644
> --- a/src/Virt_RASD.c
> +++ b/src/Virt_RASD.c
> @@ -157,8 +157,12 @@ static CMPIStatus set_proc_rasd_params(const
> CMPIBroker *broker,
> goto out;
> }
>
> - /* Currently only support CPU cgroups for running KVM guests */
> + /* Early versions of libvirt only support CPU cgroups for
> *running* KVM guests */
> +#if LIBVIR_VERSION_NUMBER< 9000
> if (domain_online(dom)&& STREQC(virConnectGetType(conn),
> "QEMU")) {
> +#else
> + if (STREQC(virConnectGetType(conn), "QEMU")) {
> +#endif
> char *sched;
> int nparams;
> unsigned int i;
> diff --git a/src/Virt_VirtualSystemManagementService.c
> b/src/Virt_VirtualSystemManagementService.c
> index 21979c3..f6b191e 100644
> --- a/src/Virt_VirtualSystemManagementService.c
> +++ b/src/Virt_VirtualSystemManagementService.c
> @@ -1783,7 +1783,37 @@ static CMPIStatus update_dominfo(const struct
> domain *dominfo,
> goto out;
> }
>
> +#if LIBVIR_VERSION_NUMBER< 9000
> + /* Old libvirt versions dont save cpu cgroup setting for
> inactive */
> + /* guests, so save in infostore instead */
> infostore_set_u64(ctx, "weight", dev->dev.vcpu.weight);
> +#else
> + /* New libvirt versions save cpu cgroup setting in KVM guest
> config */
> + if (STREQC(virConnectGetType(conn), "QEMU")) {
> + int ret;
> + virSchedParameter params;
> + strncpy(params.field,
> + "cpu_shares",
> + VIR_DOMAIN_SCHED_FIELD_LENGTH);
> + params.type = VIR_DOMAIN_SCHED_FIELD_ULLONG;
> + params.value.ul = dev->dev.vcpu.weight;
> +
> + CU_DEBUG("setting %s scheduler param cpu_shares=%d",
> + dominfo->name,
> + dev->dev.vcpu.weight);
> + ret =
> virDomainSetSchedulerParametersFlags(dom,¶ms, 1,
> + VIR_DOMAIN_AFFECT_CONFIG);
> + if (ret != 0) {
> + CU_DEBUG("Failed to set config scheduler
> param");
> + cu_statusf(_BROKER,&s,
> + CMPI_RC_ERR_FAILED,
> + "Failed to set config scheduler
> param");
> + goto out;
> + }
> + }
> + else
> + infostore_set_u64(ctx, "weight",
dev->dev.vcpu.weight);
> +#endif
> infostore_set_u64(ctx, "limit", dev->dev.vcpu.limit);
>
> out:
--
Chip Vincent
Open Virtualization
IBM Linux Technology Center
cvincent(a)linux.vnet.ibm.com