[libvirt] [PULL 10/11] i386: Implement query-cpu-model-expansion QMP command
by Eduardo Habkost
Implement query-cpu-model-expansion for target-i386.
This should meet all the requirements while being simple. In the
case of static expansion, it will use the new "base" CPU model,
and in the case of full expansion, it will keep the original CPU
model name+props, and append extra properties.
A future follow-up should improve the implementation of
type=full, so that it returns more detailed data, including every
writable QOM property in the CPU object.
Cc: libvir-list(a)redhat.com
Cc: Jiri Denemark <jdenemar(a)redhat.com>
Message-Id: <20170222190029.17243-3-ehabkost(a)redhat.com>
Tested-by: Jiri Denemark <jdenemar(a)redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost(a)redhat.com>
---
monitor.c | 4 +-
target/i386/cpu.c | 191 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 193 insertions(+), 2 deletions(-)
diff --git a/monitor.c b/monitor.c
index f8f4a07cfb..b68944d93c 100644
--- a/monitor.c
+++ b/monitor.c
@@ -984,8 +984,10 @@ static void qmp_unregister_commands_hack(void)
#ifndef TARGET_ARM
qmp_unregister_command("query-gic-capabilities");
#endif
-#if !defined(TARGET_S390X)
+#if !defined(TARGET_S390X) && !defined(TARGET_I386)
qmp_unregister_command("query-cpu-model-expansion");
+#endif
+#if !defined(TARGET_S390X)
qmp_unregister_command("query-cpu-model-baseline");
qmp_unregister_command("query-cpu-model-comparison");
#endif
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 0a71594445..139b7ea12e 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -29,10 +29,16 @@
#include "qemu/option.h"
#include "qemu/config-file.h"
#include "qapi/qmp/qerror.h"
+#include "qapi/qmp/qstring.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qmp/qbool.h"
+#include "qapi/qmp/qint.h"
+#include "qapi/qmp/qfloat.h"
#include "qapi-types.h"
#include "qapi-visit.h"
#include "qapi/visitor.h"
+#include "qom/qom-qobject.h"
#include "sysemu/arch_init.h"
#if defined(CONFIG_KVM)
@@ -2288,7 +2294,7 @@ static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
}
}
-/* Load data from X86CPUDefinition
+/* Load data from X86CPUDefinition into a X86CPU object
*/
static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
{
@@ -2297,6 +2303,11 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
char host_vendor[CPUID_VENDOR_SZ + 1];
FeatureWord w;
+ /*NOTE: any property set by this function should be returned by
+ * x86_cpu_static_props(), so static expansion of
+ * query-cpu-model-expansion is always complete.
+ */
+
/* CPU models only set _minimum_ values for level/xlevel: */
object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
@@ -2341,6 +2352,184 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
}
+/* Return a QDict containing keys for all properties that can be included
+ * in static expansion of CPU models. All properties set by x86_cpu_load_def()
+ * must be included in the dictionary.
+ */
+static QDict *x86_cpu_static_props(void)
+{
+ FeatureWord w;
+ int i;
+ static const char *props[] = {
+ "min-level",
+ "min-xlevel",
+ "family",
+ "model",
+ "stepping",
+ "model-id",
+ "vendor",
+ "lmce",
+ NULL,
+ };
+ static QDict *d;
+
+ if (d) {
+ return d;
+ }
+
+ d = qdict_new();
+ for (i = 0; props[i]; i++) {
+ qdict_put_obj(d, props[i], qnull());
+ }
+
+ for (w = 0; w < FEATURE_WORDS; w++) {
+ FeatureWordInfo *fi = &feature_word_info[w];
+ int bit;
+ for (bit = 0; bit < 32; bit++) {
+ if (!fi->feat_names[bit]) {
+ continue;
+ }
+ qdict_put_obj(d, fi->feat_names[bit], qnull());
+ }
+ }
+
+ return d;
+}
+
+/* Add an entry to @props dict, with the value for property. */
+static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
+{
+ QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
+ &error_abort);
+
+ qdict_put_obj(props, prop, value);
+}
+
+/* Convert CPU model data from X86CPU object to a property dictionary
+ * that can recreate exactly the same CPU model.
+ */
+static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
+{
+ QDict *sprops = x86_cpu_static_props();
+ const QDictEntry *e;
+
+ for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
+ const char *prop = qdict_entry_key(e);
+ x86_cpu_expand_prop(cpu, props, prop);
+ }
+}
+
+static void object_apply_props(Object *obj, QDict *props, Error **errp)
+{
+ const QDictEntry *prop;
+ Error *err = NULL;
+
+ for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
+ object_property_set_qobject(obj, qdict_entry_value(prop),
+ qdict_entry_key(prop), &err);
+ if (err) {
+ break;
+ }
+ }
+
+ error_propagate(errp, err);
+}
+
+/* Create X86CPU object according to model+props specification */
+static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
+{
+ X86CPU *xc = NULL;
+ X86CPUClass *xcc;
+ Error *err = NULL;
+
+ xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
+ if (xcc == NULL) {
+ error_setg(&err, "CPU model '%s' not found", model);
+ goto out;
+ }
+
+ xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
+ if (props) {
+ object_apply_props(OBJECT(xc), props, &err);
+ if (err) {
+ goto out;
+ }
+ }
+
+ x86_cpu_expand_features(xc, &err);
+ if (err) {
+ goto out;
+ }
+
+out:
+ if (err) {
+ error_propagate(errp, err);
+ object_unref(OBJECT(xc));
+ xc = NULL;
+ }
+ return xc;
+}
+
+CpuModelExpansionInfo *
+arch_query_cpu_model_expansion(CpuModelExpansionType type,
+ CpuModelInfo *model,
+ Error **errp)
+{
+ X86CPU *xc = NULL;
+ Error *err = NULL;
+ CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
+ QDict *props = NULL;
+ const char *base_name;
+
+ xc = x86_cpu_from_model(model->name,
+ model->has_props ?
+ qobject_to_qdict(model->props) :
+ NULL, &err);
+ if (err) {
+ goto out;
+ }
+
+
+ switch (type) {
+ case CPU_MODEL_EXPANSION_TYPE_STATIC:
+ /* Static expansion will be based on "base" only */
+ base_name = "base";
+ break;
+ case CPU_MODEL_EXPANSION_TYPE_FULL:
+ /* As we don't return every single property, full expansion needs
+ * to keep the original model name+props, and add extra
+ * properties on top of that.
+ */
+ base_name = model->name;
+ if (model->has_props && model->props) {
+ props = qdict_clone_shallow(qobject_to_qdict(model->props));
+ }
+ break;
+ default:
+ error_setg(&err, "Unsupportted expansion type");
+ goto out;
+ }
+
+ if (!props) {
+ props = qdict_new();
+ }
+ x86_cpu_to_dict(xc, props);
+
+ ret->model = g_new0(CpuModelInfo, 1);
+ ret->model->name = g_strdup(base_name);
+ ret->model->props = QOBJECT(props);
+ ret->model->has_props = true;
+
+out:
+ object_unref(OBJECT(xc));
+ if (err) {
+ error_propagate(errp, err);
+ qapi_free_CpuModelExpansionInfo(ret);
+ ret = NULL;
+ }
+ return ret;
+}
+
X86CPU *cpu_x86_init(const char *cpu_model)
{
return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
--
2.11.0.259.g40922b1
7 years, 10 months
Re: [libvirt] Redhat 7: cgroup CPUACCT controller is not mounted
by youssef.elfathi@orange.com
Sorry , please ignore my message I used the wrong mailing-list!
-----Original Message-----
From: EL FATHI Youssef OBS/OINIS
Sent: Monday, February 27, 2017 14:40
To: 'libvir-list(a)redhat.com'
Subject: Redhat 7: cgroup CPUACCT controller is not mounted
Hi,
With a non-root user account, I am launching virtual machines and would like to get CPU stats for each Core (using python API or not) but face the following problem:
- When I issue the command "virsh --readonly cpu-stats MY_DOMAIN" I got the following error:
error: Failed to retrieve CPU statistics for domain 'MY_DOMAIN'
error: Requested operation is not valid: cgroup CPUACCT controller is not mounted
- I checked that cgroup is well mounted:
$ cat /proc/mounts | grep cgroup
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0 cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0 cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_prio,net_cls 0 0 cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
$ cat /proc/cgroups
#subsys_name hierarchy num_cgroups enabled
cpuset 6 1 1
cpu 7 1 1
cpuacct 7 1 1
memory 11 1 1
devices 2 1 1
freezer 5 1 1
net_cls 8 1 1
blkio 10 1 1
perf_event 9 1 1
hugetlb 3 1 1
pids 4 1 1
net_prio 8 1 1
- I checked the system-cgtop but don't have no CPU info for my VMs (first line starting with /):
Path Tasks %CPU Memory Input/s Output/s
/ 332 808.0 21.3G - -
/system.slice/auditd.service 1 - - - -
/system.slice/crond.service 1 - - - -
/system.slice/dbus.service 1 - - - -
/system.slice/gssproxy.service 1 - - - -
/system.slice/irqbalance.service 1 - - - -
/system.slice/ksmtuned.service 2 - - - -
/system.slice/libvirtd.service 1 - - - -
/system.slice/lvm2-lvmetad.service 1 - - - -
/system.slice/polkit.service 1 - - - -
/system.slice/rhnsd.service 1 - - - -
/system.slice/rhsmcertd.service 1 - - - -
/system.slice/rsyslog.service 1 - - - -
/system.slice/sshd.service 1 - - - -
/system.slice/system-getty.slice/getty(a)tty1.service 1 - - - -
/system.slice/systemd-journald.service 1 - - - -
/system.slice/systemd-logind.service 1 - - - -
/system.slice/systemd-udevd.service 1 - - - -
/system.slice/tuned.service 1 - - - -
/user.slice/user-3972.slice/session-15191.scope 3 - - - -
/user.slice/user-3972.slice/session-16005.scope 4 - - - -
/user.slice/user-3972.slice/session-16019.scope 10 - - - -
Thanks by advance for your help!
Regards,
Youssef
_________________________________________________________________________________________________________________________
Ce message et ses pieces jointes peuvent contenir des informations confidentielles ou privilegiees et ne doivent donc
pas etre diffuses, exploites ou copies sans autorisation. Si vous avez recu ce message par erreur, veuillez le signaler
a l'expediteur et le detruire ainsi que les pieces jointes. Les messages electroniques etant susceptibles d'alteration,
Orange decline toute responsabilite si ce message a ete altere, deforme ou falsifie. Merci.
This message and its attachments may contain confidential or privileged information that may be protected by law;
they should not be distributed, used or copied without authorisation.
If you have received this email in error, please notify the sender and delete this message and its attachments.
As emails may be altered, Orange is not liable for messages that have been modified, changed or falsified.
Thank you.
7 years, 10 months
[libvirt] Redhat 7: cgroup CPUACCT controller is not mounted
by youssef.elfathi@orange.com
Hi,
With a non-root user account, I am launching virtual machines and would like to get CPU stats for each Core (using python API or not) but face the following problem:
- When I issue the command "virsh --readonly cpu-stats MY_DOMAIN" I got the following error:
error: Failed to retrieve CPU statistics for domain 'MY_DOMAIN'
error: Requested operation is not valid: cgroup CPUACCT controller is not mounted
- I checked that cgroup is well mounted:
$ cat /proc/mounts | grep cgroup
tmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0
cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0
cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0
cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0
cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0
cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpuacct,cpu 0 0
cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_prio,net_cls 0 0
cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0
cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0
cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0
$ cat /proc/cgroups
#subsys_name hierarchy num_cgroups enabled
cpuset 6 1 1
cpu 7 1 1
cpuacct 7 1 1
memory 11 1 1
devices 2 1 1
freezer 5 1 1
net_cls 8 1 1
blkio 10 1 1
perf_event 9 1 1
hugetlb 3 1 1
pids 4 1 1
net_prio 8 1 1
- I checked the system-cgtop but don't have no CPU info for my VMs (first line starting with /):
Path Tasks %CPU Memory Input/s Output/s
/ 332 808.0 21.3G - -
/system.slice/auditd.service 1 - - - -
/system.slice/crond.service 1 - - - -
/system.slice/dbus.service 1 - - - -
/system.slice/gssproxy.service 1 - - - -
/system.slice/irqbalance.service 1 - - - -
/system.slice/ksmtuned.service 2 - - - -
/system.slice/libvirtd.service 1 - - - -
/system.slice/lvm2-lvmetad.service 1 - - - -
/system.slice/polkit.service 1 - - - -
/system.slice/rhnsd.service 1 - - - -
/system.slice/rhsmcertd.service 1 - - - -
/system.slice/rsyslog.service 1 - - - -
/system.slice/sshd.service 1 - - - -
/system.slice/system-getty.slice/getty(a)tty1.service 1 - - - -
/system.slice/systemd-journald.service 1 - - - -
/system.slice/systemd-logind.service 1 - - - -
/system.slice/systemd-udevd.service 1 - - - -
/system.slice/tuned.service 1 - - - -
/user.slice/user-3972.slice/session-15191.scope 3 - - - -
/user.slice/user-3972.slice/session-16005.scope 4 - - - -
/user.slice/user-3972.slice/session-16019.scope 10 - - - -
Thanks by advance for your help!
Regards,
Youssef
_________________________________________________________________________________________________________________________
Ce message et ses pieces jointes peuvent contenir des informations confidentielles ou privilegiees et ne doivent donc
pas etre diffuses, exploites ou copies sans autorisation. Si vous avez recu ce message par erreur, veuillez le signaler
a l'expediteur et le detruire ainsi que les pieces jointes. Les messages electroniques etant susceptibles d'alteration,
Orange decline toute responsabilite si ce message a ete altere, deforme ou falsifie. Merci.
This message and its attachments may contain confidential or privileged information that may be protected by law;
they should not be distributed, used or copied without authorisation.
If you have received this email in error, please notify the sender and delete this message and its attachments.
As emails may be altered, Orange is not liable for messages that have been modified, changed or falsified.
Thank you.
7 years, 10 months
Re: [libvirt] Fail to start 2nd guest
by Stefan Hajnoczi
On Mon, Feb 27, 2017 at 08:50:03PM +0800, Xiong Zhou wrote:
> On Mon, Feb 27, 2017 at 10:11:04AM +0000, Stefan Hajnoczi wrote:
> > On Mon, Feb 27, 2017 at 05:40:50PM +0800, Xiong Zhou wrote:
> > > It worked fine on Linus tree commit:
> > > 7bb0338 Merge tag 'rodata-v4.11-rc1' of git://git.kernel.org/pub/scm/..
> > >
> > > failed to start 2nd domain on this commit:
> > > 37c8596 Merge tag 'tty-4.11-rc1' of git://git.kernel.org/pub/scm/..
> > > (this commit probably is not the first bad, i didn't do the bisecting)
> > >
> > >
> > > sh-4.2# uname -r
> > > 4.10.0-master-37c8596+
> > > sh-4.2# rpm -qv qemu
> > > qemu-2.0.0-1.el7.6.x86_64
> > > sh-4.2# ps ajxf | grep qemu
> > > ...
> > > sh-4.2# virsh list
> > > Id Name State
> > > ----------------------------------------------------
> > > 1 73h running
> > >
> > > sh-4.2# virsh start 73us
> > > error: Failed to start domain 73us
> > > error: internal error: qemu unexpectedly closed the monitor: ((null):11497): Spice-Warning **: reds.c:2499:reds_init_socket: listen: Address already in use
> > > 2017-02-27T09:33:42.335708Z qemu-kvm: failed to initialize spice server
> >
> > The error message says that the spice remote desktop cannot listen on
> > -spice port=5900,addr=127.0.0.1.
> >
> > Did you hardcode port 5900 in the domain XML? That could explain why
> No.
> > the second guest fails to launch - you need to use unique port numbers
> > or let libvirt automatically assign them. Check the domain XML:
> >
> > <graphics type='spice' port='-1' tlsPort='-1' autoport='yes'>
>
> It looks like:
>
> <graphics type='spice' autoport='yes'>
> <listen type='address'/>
> <image compression='off'/>
> </graphics>
>
>
> >
> > Another possibility is that a process running on the host is already
> > using port 5900. Perhaps a guest or VNC server that was launched
> > outside of libvirt? You can check this with:
> >
> > netstat -alpn | grep 5900
> # netstat -alpn | grep 5900
> tcp 0 0 127.0.0.1:5900 0.0.0.0:* LISTEN 11065/qemu-kvm
Please check that 11065/qemu-kvm was launched by the same libvirtd and
its domain XML also uses autoport='yes'.
I have CCed the libvirt mailing list because they may be able to explain
why there is a collision on TCP port 5900.
Stefan
7 years, 10 months
[libvirt] [PATCH 00/14] Introduce NVDIMM support
by Michal Privoznik
NVDIMMs are new type of ultra fast storage that's plugged into DIMM slot and
can hold the stored info throughout reboots. After all, NV stands for
non-volatile. In virtualization world, this has an awesome advantage - less
VM_EXITs on 'disk' IO.
https://nvdimm.wiki.kernel.org/
Currently, there is no NVDIMM namespace support. I think qemu does not support
it either.
Michal Privoznik (14):
qemu_cgroup: Only try to allow devices if devices CGroup's available
qemuBuildMemoryBackendStr: Reorder args and update comment
Introduce NVDIMM memory model
qemu: Introduce QEMU_CAPS_DEVICE_NVDIMM
qemu: Implement NVDIMM
conf: Introduce @access to <memory/>
qemu: Implement @access for <memory/> banks
security_dac: Label host side of NVDIMM
security_selinux: Label host side of NVDIMM
security: Introduce internal APIs for memdev labelling
secdrivers: Implement memdev relabel APIs
qemu_hotplug: Relabel memdev
qemu: Allow nvdimm in devices CGroups
qemu: Namespaces for NVDIMM
docs/formatdomain.html.in | 70 +++++++---
docs/schemas/domaincommon.rng | 40 ++++--
src/conf/domain_conf.c | 112 +++++++++++----
src/conf/domain_conf.h | 4 +
src/libvirt_private.syms | 2 +
src/qemu/qemu_alias.c | 10 +-
src/qemu/qemu_capabilities.c | 2 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_cgroup.c | 70 ++++++++++
src/qemu/qemu_cgroup.h | 4 +
src/qemu/qemu_command.c | 152 ++++++++++++++-------
src/qemu/qemu_command.h | 16 ++-
src/qemu/qemu_domain.c | 105 +++++++++++++-
src/qemu/qemu_domain.h | 8 ++
src/qemu/qemu_hotplug.c | 42 +++++-
src/qemu/qemu_security.c | 56 ++++++++
src/qemu/qemu_security.h | 8 ++
src/security/security_dac.c | 76 +++++++++++
src/security/security_driver.h | 9 ++
src/security/security_manager.c | 56 ++++++++
src/security/security_manager.h | 7 +
src/security/security_nop.c | 19 +++
src/security/security_selinux.c | 69 ++++++++++
src/security/security_stack.c | 38 ++++++
tests/qemucapabilitiesdata/caps_2.6.0.x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_2.7.0.x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml | 1 +
tests/qemucapabilitiesdata/caps_2.9.0.x86_64.xml | 1 +
.../qemuxml2argv-memory-hotplug-nvdimm-access.args | 26 ++++
.../qemuxml2argv-memory-hotplug-nvdimm-access.xml | 49 +++++++
.../qemuxml2argv-memory-hotplug-nvdimm.args | 26 ++++
.../qemuxml2argv-memory-hotplug-nvdimm.xml | 56 ++++++++
tests/qemuxml2argvtest.c | 6 +-
.../qemuxml2xmlout-memory-hotplug-nvdimm.xml | 1 +
tests/qemuxml2xmltest.c | 1 +
35 files changed, 1025 insertions(+), 120 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-nvdimm-access.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-nvdimm-access.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-nvdimm.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-nvdimm.xml
create mode 120000 tests/qemuxml2xmloutdata/qemuxml2xmlout-memory-hotplug-nvdimm.xml
--
2.11.0
7 years, 10 months
[libvirt] [PATCH RESEND] vz: make more accurate closing connection to sdk
by Nikolay Shirokovskiy
Current code for example can call unsubscribe if connection
succeeds but subscribing fails. This will probabaly lead
only to spurious error messages without any actual inconsistencies
but nevertheless.
---
src/vz/vz_driver.c | 9 ++------
src/vz/vz_sdk.c | 65 +++++++++++++++++++++++++++---------------------------
src/vz/vz_sdk.h | 2 --
3 files changed, 34 insertions(+), 42 deletions(-)
diff --git a/src/vz/vz_driver.c b/src/vz/vz_driver.c
index 6aade90..4408591 100644
--- a/src/vz/vz_driver.c
+++ b/src/vz/vz_driver.c
@@ -163,11 +163,7 @@ static void vzDriverDispose(void * obj)
{
vzDriverPtr driver = obj;
- if (driver->server) {
- prlsdkUnsubscribeFromPCSEvents(driver);
- prlsdkDisconnect(driver);
- }
-
+ prlsdkDisconnect(driver);
virObjectUnref(driver->domains);
virObjectUnref(driver->caps);
virObjectUnref(driver->xmlopt);
@@ -348,8 +344,7 @@ vzDriverObjNew(void)
!(driver->domains = virDomainObjListNew()) ||
!(driver->domainEventState = virObjectEventStateNew()) ||
(vzInitVersion(driver) < 0) ||
- (prlsdkConnect(driver) < 0) ||
- (prlsdkSubscribeToPCSEvents(driver) < 0)) {
+ (prlsdkConnect(driver) < 0)) {
virObjectUnref(driver);
return NULL;
}
diff --git a/src/vz/vz_sdk.c b/src/vz/vz_sdk.c
index 3f46de7..4c6c3a2 100644
--- a/src/vz/vz_sdk.c
+++ b/src/vz/vz_sdk.c
@@ -43,6 +43,8 @@ static int
prlsdkUUIDParse(const char *uuidstr, unsigned char *uuid);
static void
prlsdkConvertError(PRL_RESULT pret);
+static PRL_RESULT
+prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque);
VIR_LOG_INIT("parallels.sdk");
@@ -363,41 +365,62 @@ prlsdkConnect(vzDriverPtr driver)
job = PrlSrv_LoginLocalEx(driver->server, NULL, 0,
PSL_HIGH_SECURITY, PACF_NON_INTERACTIVE_MODE);
if (PRL_FAILED(getJobResult(job, &result)))
- goto cleanup;
+ goto destroy;
pret = PrlResult_GetParam(result, &response);
- prlsdkCheckRetGoto(pret, cleanup);
+ prlsdkCheckRetGoto(pret, logoff);
pret = prlsdkGetStringParamBuf(PrlLoginResponse_GetSessionUuid,
response, session_uuid, sizeof(session_uuid));
- prlsdkCheckRetGoto(pret, cleanup);
+ prlsdkCheckRetGoto(pret, logoff);
if (prlsdkUUIDParse(session_uuid, driver->session_uuid) < 0)
- goto cleanup;
+ goto logoff;
+
+ pret = PrlSrv_RegEventHandler(driver->server,
+ prlsdkEventsHandler,
+ driver);
+ prlsdkCheckRetGoto(pret, logoff);
ret = 0;
cleanup:
- if (ret < 0) {
- PrlHandle_Free(driver->server);
- driver->server = PRL_INVALID_HANDLE;
- }
-
PrlHandle_Free(result);
PrlHandle_Free(response);
return ret;
+
+ logoff:
+ job = PrlSrv_Logoff(driver->server);
+ waitJob(job);
+
+ destroy:
+ PrlHandle_Free(driver->server);
+ driver->server = PRL_INVALID_HANDLE;
+
+ goto cleanup;
}
void
prlsdkDisconnect(vzDriverPtr driver)
{
PRL_HANDLE job;
+ PRL_RESULT ret;
+
+ if (driver->server == PRL_INVALID_HANDLE)
+ return;
+
+ ret = PrlSrv_UnregEventHandler(driver->server,
+ prlsdkEventsHandler,
+ driver);
+ if (PRL_FAILED(ret))
+ logPrlError(ret);
job = PrlSrv_Logoff(driver->server);
waitJob(job);
PrlHandle_Free(driver->server);
+ driver->server = PRL_INVALID_HANDLE;
}
static int
@@ -2334,30 +2357,6 @@ prlsdkEventsHandler(PRL_HANDLE prlEvent, PRL_VOID_PTR opaque)
return PRL_ERR_SUCCESS;
}
-int prlsdkSubscribeToPCSEvents(vzDriverPtr driver)
-{
- PRL_RESULT pret = PRL_ERR_UNINITIALIZED;
-
- pret = PrlSrv_RegEventHandler(driver->server,
- prlsdkEventsHandler,
- driver);
- prlsdkCheckRetGoto(pret, error);
- return 0;
-
- error:
- return -1;
-}
-
-void prlsdkUnsubscribeFromPCSEvents(vzDriverPtr driver)
-{
- PRL_RESULT ret = PRL_ERR_UNINITIALIZED;
- ret = PrlSrv_UnregEventHandler(driver->server,
- prlsdkEventsHandler,
- driver);
- if (PRL_FAILED(ret))
- logPrlError(ret);
-}
-
int prlsdkStart(virDomainObjPtr dom)
{
PRL_HANDLE job = PRL_INVALID_HANDLE;
diff --git a/src/vz/vz_sdk.h b/src/vz/vz_sdk.h
index 7e34cbf..f8da2ad 100644
--- a/src/vz/vz_sdk.h
+++ b/src/vz/vz_sdk.h
@@ -35,8 +35,6 @@ prlsdkAddDomainByUUID(vzDriverPtr driver, const unsigned char *uuid);
virDomainObjPtr
prlsdkAddDomainByName(vzDriverPtr driver, const char *name);
int prlsdkUpdateDomain(vzDriverPtr driver, virDomainObjPtr dom);
-int prlsdkSubscribeToPCSEvents(vzDriverPtr driver);
-void prlsdkUnsubscribeFromPCSEvents(vzDriverPtr driver);
int prlsdkStart(virDomainObjPtr dom);
int prlsdkKill(virDomainObjPtr dom);
--
1.8.3.1
7 years, 10 months
[libvirt] [PATCH v2 0/2] tests: Reduce QEMU_CAPS_DEVICE_{DMI_TO_, }PCI_BRIDGE usage
by Andrea Bolognani
Changes from v1:
* don't drop capabilities for a bunch of test cases,
so that potential regressions will be still caught.
Andrea Bolognani (2):
tests: Sync tests between qemuxml2argv and qemuxml2xml
tests: Reduce QEMU_CAPS_DEVICE_{DMI_TO_, }PCI_BRIDGE usage
.../qemuxml2argvdata/qemuxml2argv-pci-bridge.args | 90 +++++++++
tests/qemuxml2argvdata/qemuxml2argv-pci-bridge.xml | 97 +++-------
.../qemuxml2argv-vcpu-placement-static.args | 20 ++
tests/qemuxml2argvtest.c | 109 +++++------
.../qemuxml2xmlout-boot-floppy-q35.xml | 32 +++
.../qemuxml2xmlout-bootindex-floppy-q35.xml | 32 +++
.../qemuxml2xmlout-intel-iommu-machine.xml | 26 +++
.../qemuxml2xmlout-pci-bridge.xml | 98 ++++------
.../qemuxml2xmloutdata/qemuxml2xmlout-pci-many.xml | 214 +++++++++++++++++++++
...qemuxml2xmlout-video-device-pciaddr-default.xml | 51 +++++
tests/qemuxml2xmltest.c | 130 +++++++++----
11 files changed, 665 insertions(+), 234 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-pci-bridge.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-vcpu-placement-static.args
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-boot-floppy-q35.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-bootindex-floppy-q35.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-intel-iommu-machine.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-pci-many.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-video-device-pciaddr-default.xml
--
2.7.4
7 years, 10 months
[libvirt] RFC: Making storage vol upload/download more useful
by Daniel P. Berrange
The virStorageVolUpload/Download APIs currently require that you upload
the physical disk image content, rather than the virtual disk image
content. ie the entire qcow2 file, not just its payload.
While this is certainly useful in some scenarios, it is rather unhelpful,
if not dangerous, in others.
For example, a qcow2 file can contain a backing file that points to
anything on the host filesystem and libvirt happily accepts it. The
best you can do is to query the volume XML again after upload and see
if it now reports a backing file. It would be highly desirable to
block this kind of thing before upload, because you don't want the
act of uploading to cause libvirt to open these arbitrary files.
In fact no matter what format you're storage volume currently is,
when you're uploading new content at a physical level, it can change
to any other format.
If uploading non-raw files, there's also no way to restrict the size
of the files uploaded. ie you can upload a 256 KB sized qcow2 file
whose logical disk size is 4 exabytes. Again you can only detect
this after you've uploaded it, unless you're willing to run qemu-img
manually before upload, or write your own parser to validate upfront.
I would probably ok to just ignore this problem - checking after the
fact doesn't cause security problems, as the large logical size is
an issue until the disk is given to a guest for use.
Finally, you might not even have the file in the desired format. For
example, someone might have given you a raw file,but you want to store
everything as qcow2 files. Currently you would have to create a new
volume, upload the content to a new volume, delete the existing volume
and use virStorageVolCreateFrom to re-create it again with a forkmat
conversion, before finally deleting the temporary upload. It would be
very nice to be able to upload straight into the payload of an existing
file in one go - even if libvirt has to do a qemu-img conversion in the
process.
Some of these problems can be fixed with adding flags. For example I
would suggest
- VIR_STORAGE_VOL_UPLOAD_VIRTUAL => the stream provides the
virtual disk image payload, not a physical disk image container
- VIR_STORAGE_VOL_UPLOAD_FIXED_FORMAT => when VIRTUAL is not set,
this indicates that the physical source format, must match the
current storage vol format. ie don't allow the upload to change
the image format by uploading a raw file into a qcow2 file or
vica-verca
- VIR_STORAGE_VOL_UPLOAD_NO_BACKING_FILE => when VIRUTAL is not
set this indicates that the uploaded content must not be
permitted to have any backing file set.
To allow on the fly conversion of data when using the VIRTUAL flag
though, we need a new API to expose the stream data format, otherwise
libvirt would have to probe stream format to know what to convert
from, and that is unsafe from a security POV:
virStorageVolUploadConvert(virStorageVolPtr vol,
virStreamPtr stream,
int srcFormat,
unsigned long long offset,
unsigned long long length,
unsigned int flags);
The srcFormat parameter would map to virStorageFileFormat enum, which
would need to become public. It says what format the stream is providing
data in. In terms of implementation we would have to write the data into
a temporary file, and then use qemu-img convert to get it into the actual
file we want. We can't feed straight into qemu-img convert, since it
needs a seekable file as input & output.
If we had this functionality, then I think it would be possible for apps
to deal with storage uploads from end users, without having to run
qemu-img manually to validate data outside libvirt.
Regards,
Daniel
--
|: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org -o- http://virt-manager.org :|
|: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|
7 years, 10 months