[libvirt] [RFC][PATCHv1 0/5] libvirt - show per cpu accounting information

When running 'virt-top -1' , "show percpu statistics mode", virt-top shows a fake value. When running 'while true; do echo hello;done' on a 4vcpu guest, == 0 0.0 1 0.0 2 0.0 3 0.0 4 25.0 25.0= 5 25.0 25.0=# 6 25.0 25.0= 7 25.0 25.0= == All cpus just used equally ;) This is because there is no interface to get per-cpu usage of domain. This patch adds an interface virDomainPcpuStats() to get per-cpu statistics, cpuTime in nanoseconds. Here is a test result with a python script using new interface. == [root@bluextal python]# ./virt-cpuacct.py ({'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 4679204346L}, {'cpuTime': 2103820380L}, {'cpuTime': 8904513019L}, {'cpuTime': 7424701195L}) [root@bluextal python]# ./virt-cpuacct.py ({'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 0L}, {'cpuTime': 57010689139L}, {'cpuTime': 26152907202L}, {'cpuTime': 53759693931L}, {'cpuTime': 43074348218L}) == Although I added a new interface, I still wonder what interface is better... any comments are welcome. Thanks, -Kame

cpuacct cgroup provides per cpu cputime. Add an interface for that. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- src/libvirt_private.syms | 1 + src/util/cgroup.c | 27 +++++++++++++++++++++++++++ src/util/cgroup.h | 2 ++ 3 files changed, 30 insertions(+), 0 deletions(-) diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index ba7739d..f7a1ee7 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -70,6 +70,7 @@ virCgroupFree; virCgroupGetBlkioWeight; virCgroupGetCpuShares; virCgroupGetCpuacctUsage; +virCgroupGetCpuacctUsagePercpu; virCgroupGetFreezerState; virCgroupGetMemoryHardLimit; virCgroupGetMemorySoftLimit; diff --git a/src/util/cgroup.c b/src/util/cgroup.c index afe8731..58ea805 100644 --- a/src/util/cgroup.c +++ b/src/util/cgroup.c @@ -1329,6 +1329,33 @@ int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage) "cpuacct.usage", usage); } +int virCgroupGetCpuacctUsagePercpu(virCgroupPtr group, + int nr, unsigned long long *usage) +{ + int i, rc; + char *strval = NULL; + char *startp, *endp; + unsigned long long int value; + + memset(usage, 0, sizeof(unsigned long long) * nr); + + rc = virCgroupGetValueStr(group, VIR_CGROUP_CONTROLLER_CPUACCT, + "cpuacct.usage_percpu", &strval); + if (rc) + goto out; + + for (i = 0, startp = strval; i < nr; i++) { + if (virStrToLong_ull(startp, &endp, 10, &value)) + break; + startp = endp; + usage[i] = value; + } + rc = i; +out: + VIR_FREE(strval); + return rc; +} + int virCgroupSetFreezerState(virCgroupPtr group, const char *state) { return virCgroupSetValueStr(group, diff --git a/src/util/cgroup.h b/src/util/cgroup.h index 8ae756d..31cbcba 100644 --- a/src/util/cgroup.h +++ b/src/util/cgroup.h @@ -100,6 +100,8 @@ int virCgroupSetCpuShares(virCgroupPtr group, unsigned long long shares); int virCgroupGetCpuShares(virCgroupPtr group, unsigned long long *shares); int virCgroupGetCpuacctUsage(virCgroupPtr group, unsigned long long *usage); +int virCgroupGetCpuacctUsagePercpu(virCgroupPtr group, + int nr, unsigned long long *usage); int virCgroupSetFreezerState(virCgroupPtr group, const char *state); int virCgroupGetFreezerState(virCgroupPtr group, char **state); -- 1.7.4.1

Per (host) cpu activity of VMs are very insterested numbers when running VMs on large SMPs. virt-top -1 mode tries to provide the information. (But it's not implemented.) This patch adds a libvirt interface to get per cpu statistics of each nodes. This patch just adds an interface and driver entry points. So, - doesn't include patches for python. - doesn't include any driver. Following patches will add some drivers and codes for python. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- include/libvirt/libvirt.h.in | 13 ++++++++++ src/driver.h | 6 ++++ src/esx/esx_driver.c | 1 + src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 4 +++ src/libxl/libxl_driver.c | 1 + src/lxc/lxc_driver.c | 1 + src/openvz/openvz_driver.c | 1 + src/phyp/phyp_driver.c | 1 + src/qemu/qemu_driver.c | 1 + src/remote/remote_driver.c | 1 + src/test/test_driver.c | 1 + src/uml/uml_driver.c | 1 + src/vbox/vbox_tmpl.c | 1 + src/vmware/vmware_driver.c | 1 + src/xen/xen_driver.c | 1 + src/xenapi/xenapi_driver.c | 1 + 17 files changed, 91 insertions(+), 0 deletions(-) diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 5783303..6b9292c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -400,6 +400,13 @@ struct _virDomainMemoryStat { typedef virDomainMemoryStatStruct *virDomainMemoryStatPtr; +typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; + +typedef virDomainPcpuStatStruct *virDomainPcpuStatPtr; /* Domain core dump flags. */ typedef enum { @@ -923,6 +930,12 @@ int virDomainMemoryStats (virDomainPtr dom, virDomainMemoryStatPtr stats, unsigned int nr_stats, unsigned int flags); + +int virDomainPcpuStats (virDomainPtr dom, + virDomainPcpuStatPtr stats, + unsigned int nr_stats, + unsigned int flags); + int virDomainBlockPeek (virDomainPtr dom, const char *path, unsigned long long offset, diff --git a/src/driver.h b/src/driver.h index a8b79e6..ee1dac7 100644 --- a/src/driver.h +++ b/src/driver.h @@ -291,6 +291,11 @@ typedef int (virDomainPtr domain, struct _virDomainMemoryStat *stats, unsigned int nr_stats); +typedef int + (*virDrvDomainPcpuStats) + (virDomainPtr domain, + struct _virDomainPcpuStat *stats, + unsigned int nr_stats); typedef int (*virDrvDomainBlockPeek) @@ -599,6 +604,7 @@ struct _virDriver { virDrvDomainBlockStats domainBlockStats; virDrvDomainInterfaceStats domainInterfaceStats; virDrvDomainMemoryStats domainMemoryStats; + virDrvDomainPcpuStats domainPcpuStats; virDrvDomainBlockPeek domainBlockPeek; virDrvDomainMemoryPeek domainMemoryPeek; virDrvDomainGetBlockInfo domainGetBlockInfo; diff --git a/src/esx/esx_driver.c b/src/esx/esx_driver.c index 50c631b..34a31f0 100644 --- a/src/esx/esx_driver.c +++ b/src/esx/esx_driver.c @@ -4632,6 +4632,7 @@ static virDriver esxDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/libvirt.c b/src/libvirt.c index 0da9885..24ef621 100644 --- a/src/libvirt.c +++ b/src/libvirt.c @@ -4595,6 +4595,61 @@ error: } /** + * virDomainPcpuStats: + * @dom: pointer to the domain object + * @stats: nr_stats-sized array of stat structures (returned) + * @nr_stats: number of cpu statistics requested + * @flags: unused, always pass 0 + * + * This function provides per-cpu statistics for the domain. 'cpu' here means + * not vcpu. + * + * Up to 'nr_stats' elements of 'stats' will be populated with cpu statistics + * from the domain. Only statistics supported by the domain, the driver, and + * this version of libvirt will be returned. + * + * Now, only cpuTime per cpu is reported in nanoseconds. + * + * Returns: The number of stats provided or -1 in case of failure. + */ +int virDomainPcpuStats (virDomainPtr dom, virDomainPcpuStatPtr stats, + unsigned int nr_stats, unsigned int flags) +{ + virConnectPtr conn; + unsigned long nr_stats_ret = 0; + + virResetLastError(); + + if (!VIR_IS_CONNECTED_DOMAIN (dom)) { + virLibDomainError(VIR_ERR_INVALID_DOMAIN, __FUNCTION__); + virDispatchError(NULL); + return -1; + } + if (flags != 0) { + virLibDomainError(VIR_ERR_INVALID_ARG, + _("flags must be zero")); + goto error; + } + + if (!stats || nr_stats == 0) + return 0; + + conn = dom->conn; + if (conn->driver->domainPcpuStats) { + nr_stats_ret = conn->driver->domainPcpuStats (dom, stats, nr_stats); + if (nr_stats_ret == -1) + goto error; + return nr_stats_ret; + } + + virLibDomainError(VIR_ERR_NO_SUPPORT, __FUNCTION__); + +error: + virDispatchError(dom->conn); + return -1; +} + +/** * virDomainBlockPeek: * @dom: pointer to the domain object * @path: path to the block device diff --git a/src/libvirt_public.syms b/src/libvirt_public.syms index b4aed41..312ba66 100644 --- a/src/libvirt_public.syms +++ b/src/libvirt_public.syms @@ -436,4 +436,8 @@ LIBVIRT_0.9.0 { virStorageVolUpload; } LIBVIRT_0.8.8; +LIBVIRT_0.9.1 { + global: + virDomainPcpuStats; +} LIBVIRT_0.9.0; # .... define new API here using predicted next version number .... diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c index 3040914..285dcaa 100644 --- a/src/libxl/libxl_driver.c +++ b/src/libxl/libxl_driver.c @@ -2623,6 +2623,7 @@ static virDriver libxlDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c index e905302..9046a58 100644 --- a/src/lxc/lxc_driver.c +++ b/src/lxc/lxc_driver.c @@ -2866,6 +2866,7 @@ static virDriver lxcDriver = { NULL, /* domainBlockStats */ lxcDomainInterfaceStats, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/openvz/openvz_driver.c b/src/openvz/openvz_driver.c index 4af28e9..0c0004d 100644 --- a/src/openvz/openvz_driver.c +++ b/src/openvz/openvz_driver.c @@ -1627,6 +1627,7 @@ static virDriver openvzDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/phyp/phyp_driver.c b/src/phyp/phyp_driver.c index 3862c9c..ac62d62 100644 --- a/src/phyp/phyp_driver.c +++ b/src/phyp/phyp_driver.c @@ -4579,6 +4579,7 @@ static virDriver phypDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index c1a44c9..0a78a70 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -6981,6 +6981,7 @@ static virDriver qemuDriver = { qemudDomainBlockStats, /* domainBlockStats */ qemudDomainInterfaceStats, /* domainInterfaceStats */ qemudDomainMemoryStats, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ qemudDomainBlockPeek, /* domainBlockPeek */ qemudDomainMemoryPeek, /* domainMemoryPeek */ qemuDomainGetBlockInfo, /* domainGetBlockInfo */ diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index b979f71..d00b9ee 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -11261,6 +11261,7 @@ static virDriver remote_driver = { remoteDomainBlockStats, /* domainBlockStats */ remoteDomainInterfaceStats, /* domainInterfaceStats */ remoteDomainMemoryStats, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ remoteDomainBlockPeek, /* domainBlockPeek */ remoteDomainMemoryPeek, /* domainMemoryPeek */ remoteDomainGetBlockInfo, /* domainGetBlockInfo */ diff --git a/src/test/test_driver.c b/src/test/test_driver.c index 17f5ad9..0489492 100644 --- a/src/test/test_driver.c +++ b/src/test/test_driver.c @@ -5407,6 +5407,7 @@ static virDriver testDriver = { testDomainBlockStats, /* domainBlockStats */ testDomainInterfaceStats, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/uml/uml_driver.c b/src/uml/uml_driver.c index 33849a0..f3372fa 100644 --- a/src/uml/uml_driver.c +++ b/src/uml/uml_driver.c @@ -2213,6 +2213,7 @@ static virDriver umlDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ umlDomainBlockPeek, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/vbox/vbox_tmpl.c b/src/vbox/vbox_tmpl.c index 3ca34dd..4467866 100644 --- a/src/vbox/vbox_tmpl.c +++ b/src/vbox/vbox_tmpl.c @@ -8602,6 +8602,7 @@ virDriver NAME(Driver) = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/vmware/vmware_driver.c b/src/vmware/vmware_driver.c index bbfb1a4..d98ea45 100644 --- a/src/vmware/vmware_driver.c +++ b/src/vmware/vmware_driver.c @@ -967,6 +967,7 @@ static virDriver vmwareDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/xen/xen_driver.c b/src/xen/xen_driver.c index 9f47722..1a76623 100644 --- a/src/xen/xen_driver.c +++ b/src/xen/xen_driver.c @@ -2101,6 +2101,7 @@ static virDriver xenUnifiedDriver = { xenUnifiedDomainBlockStats, /* domainBlockStats */ xenUnifiedDomainInterfaceStats, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ xenUnifiedDomainBlockPeek, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ diff --git a/src/xenapi/xenapi_driver.c b/src/xenapi/xenapi_driver.c index 60b23c7..62d83d8 100644 --- a/src/xenapi/xenapi_driver.c +++ b/src/xenapi/xenapi_driver.c @@ -1849,6 +1849,7 @@ static virDriver xenapiDriver = { NULL, /* domainBlockStats */ NULL, /* domainInterfaceStats */ NULL, /* domainMemoryStats */ + NULL, /* domainPcpuStats */ NULL, /* domainBlockPeek */ NULL, /* domainMemoryPeek */ NULL, /* domainGetBlockInfo */ -- 1.7.4.1

2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>:
Per (host) cpu activity of VMs are very insterested numbers when running VMs on large SMPs. virt-top -1 mode tries to provide the information. (But it's not implemented.)
This patch adds a libvirt interface to get per cpu statistics of each nodes. This patch just adds an interface and driver entry points. So, - doesn't include patches for python. - doesn't include any driver.
Following patches will add some drivers and codes for python.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- include/libvirt/libvirt.h.in | 13 ++++++++++ src/driver.h | 6 ++++ src/esx/esx_driver.c | 1 + src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 4 +++ src/libxl/libxl_driver.c | 1 + src/lxc/lxc_driver.c | 1 + src/openvz/openvz_driver.c | 1 + src/phyp/phyp_driver.c | 1 + src/qemu/qemu_driver.c | 1 + src/remote/remote_driver.c | 1 + src/test/test_driver.c | 1 + src/uml/uml_driver.c | 1 + src/vbox/vbox_tmpl.c | 1 + src/vmware/vmware_driver.c | 1 + src/xen/xen_driver.c | 1 + src/xenapi/xenapi_driver.c | 1 + 17 files changed, 91 insertions(+), 0 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 5783303..6b9292c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -400,6 +400,13 @@ struct _virDomainMemoryStat {
typedef virDomainMemoryStatStruct *virDomainMemoryStatPtr;
+typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; +
NACK to adding another public struct to the API. It's not expendable. As a stylistic nit pleas don't use the term PCPU as this looks like "Physical CPU". Just call it virDomainPerVcpuStat at least. Also do you really need the absolute CPU time? As noted elsewhere, this is in fact not implementable for ESX. But ESX can provide the virtual CPU utilization in MHz and percent. See the virNodeGetCpuTime series [1] for a better approach. [1] https://www.redhat.com/archives/libvir-list/2011-April/msg00702.html Matthias

On Fri, 15 Apr 2011 09:43:15 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>:
Per (host) cpu activity of VMs are very insterested numbers when running VMs on large SMPs. virt-top -1 mode tries to provide the information. (But it's not implemented.)
This patch adds a libvirt interface to get per cpu statistics of each nodes. This patch just adds an interface and driver entry points. So, - doesn't include patches for python. - doesn't include any driver.
Following patches will add some drivers and codes for python.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- include/libvirt/libvirt.h.in | 13 ++++++++++ src/driver.h | 6 ++++ src/esx/esx_driver.c | 1 + src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 4 +++ src/libxl/libxl_driver.c | 1 + src/lxc/lxc_driver.c | 1 + src/openvz/openvz_driver.c | 1 + src/phyp/phyp_driver.c | 1 + src/qemu/qemu_driver.c | 1 + src/remote/remote_driver.c | 1 + src/test/test_driver.c | 1 + src/uml/uml_driver.c | 1 + src/vbox/vbox_tmpl.c | 1 + src/vmware/vmware_driver.c | 1 + src/xen/xen_driver.c | 1 + src/xenapi/xenapi_driver.c | 1 + 17 files changed, 91 insertions(+), 0 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 5783303..6b9292c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -400,6 +400,13 @@ struct _virDomainMemoryStat {
typedef virDomainMemoryStatStruct *virDomainMemoryStatPtr;
+typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; +
NACK to adding another public struct to the API.
Oh, yes. I searched a sutiable existing API but cannot found. Maybe adding new enum to Usui's work will be good but I need an array. Hmm, returning an array of unsigned long long is ok ?
It's not expendable. As a stylistic nit pleas don't use the term PCPU as this looks like "Physical CPU". Just call it virDomainPerVcpuStat at least.
Ah, no, this is PerPhysicalStat
Also do you really need the absolute CPU time? yes, for virt-top -1.
As noted elsewhere, this is in fact not implementable for ESX. But ESX can provide the virtual CPU utilization in MHz and percent.
I need physical cpu utilization by domain.
See the virNodeGetCpuTime series [1] for a better approach.
[1] https://www.redhat.com/archives/libvir-list/2011-April/msg00702.html
Thank you. -Kame

On Fri, Apr 15, 2011 at 04:43:03PM +0900, KAMEZAWA Hiroyuki wrote:
On Fri, 15 Apr 2011 09:43:15 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>:
Per (host) cpu activity of VMs are very insterested numbers when running VMs on large SMPs. virt-top -1 mode tries to provide the information. (But it's not implemented.)
This patch adds a libvirt interface to get per cpu statistics of each nodes. This patch just adds an interface and driver entry points. So, - doesn't include patches for python. - doesn't include any driver.
Following patches will add some drivers and codes for python.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- include/libvirt/libvirt.h.in | 13 ++++++++++ src/driver.h | 6 ++++ src/esx/esx_driver.c | 1 + src/libvirt.c | 55 ++++++++++++++++++++++++++++++++++++++++++ src/libvirt_public.syms | 4 +++ src/libxl/libxl_driver.c | 1 + src/lxc/lxc_driver.c | 1 + src/openvz/openvz_driver.c | 1 + src/phyp/phyp_driver.c | 1 + src/qemu/qemu_driver.c | 1 + src/remote/remote_driver.c | 1 + src/test/test_driver.c | 1 + src/uml/uml_driver.c | 1 + src/vbox/vbox_tmpl.c | 1 + src/vmware/vmware_driver.c | 1 + src/xen/xen_driver.c | 1 + src/xenapi/xenapi_driver.c | 1 + 17 files changed, 91 insertions(+), 0 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in index 5783303..6b9292c 100644 --- a/include/libvirt/libvirt.h.in +++ b/include/libvirt/libvirt.h.in @@ -400,6 +400,13 @@ struct _virDomainMemoryStat {
typedef virDomainMemoryStatStruct *virDomainMemoryStatPtr;
+typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; +
NACK to adding another public struct to the API.
Oh, yes. I searched a sutiable existing API but cannot found.
Maybe adding new enum to Usui's work will be good but I need an array. Hmm, returning an array of unsigned long long is ok ?
It's not expendable. As a stylistic nit pleas don't use the term PCPU as this looks like "Physical CPU". Just call it virDomainPerVcpuStat at least.
Ah, no, this is PerPhysicalStat
Also do you really need the absolute CPU time? yes, for virt-top -1.
I don't think that is correct. What virt-top ultimately displays to the end user is a % utilization. So it would be happy to get data in either % util & display that directly, or get absolute CPU time and calculate deltas to display % util. I wonder if we could provide an API that could be used to provide either per-VCPU or per-PCPU statistics in one go, so that we can finally have an API for cpu time that works well for VMWare. eg typedef struct _virDomainCPUTime virDomainCPUTime; typedef virDomainCPUTime *virDomainCPUTimePtr; #define VIR_DOMAIN_CPUTIME_ABS "abs" #define VIR_DOMAIN_CPUTIME_UTIL "util" ...define more stats later if desired... struct _virDomainCPUTime { char field[VIR_DOMAIN_CPU_TIME_FIELD_LENGTH]; unsigned long long value; }; typedef enum { VIR_DOMAIN_CPU_TIME_VIRTUAL = 0, VIR_DOMAIN_CPU_TIME_PHYSICAL = 1, } virDomainCPUTimeFlags; int virDomainGetCPUTime(virDomainPtr dom, virDomainCPUTimePtr stats, int *nstats, unsigned int flags); When querying per-virtual CPU time, nstats would be the number of virtual CPUs in the guest. eg virDomainInfo info; virDomainGetInfo(dom, &info); virDomainCPUTimePtr stats; stats = malloc(sizeof(*stats)*info.nrVirtCPU); virDomainGetCPUTime(dom, stats, info.nrVirtCPU, VIR_DOMAIN_CPU_TIME_VIRTUAL); Or to get the break down per physical CPU, use the VIR_NODEINFO_MAX_CPUS macro virNodeInfo info; virNodeGetInfo(conn, &info); virDomainCPUTimePtr stats; stats = malloc(sizeof(*stats)*VIR_NODEINFO_MAX_CPUS(info)) virDomainGetCPUTime(dom, stats, VIR_NODEINFO_MAX_CPUS(info), VIR_DOMAIN_CPU_TIME_PHYSICAL); We could also allow 'nstats' to be 0, in which case the API would simply populate 'nstats' with the required number and return. The caller can then re-invoke with correct nstats and allocated memory. Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :|

On Tue, 19 Apr 2011 12:29:05 +0100 "Daniel P. Berrange" <berrange@redhat.com> wrote:
On Fri, Apr 15, 2011 at 04:43:03PM +0900, KAMEZAWA Hiroyuki wrote:
On Fri, 15 Apr 2011 09:43:15 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>: Also do you really need the absolute CPU time? yes, for virt-top -1.
I don't think that is correct. What virt-top ultimately displays to the end user is a % utilization. So it would be happy to get data in either % util & display that directly, or get absolute CPU time and calculate deltas to display % util.
virt-top calculate delta by itself.
I wonder if we could provide an API that could be used to provide either per-VCPU or per-PCPU statistics in one go, so that we can finally have an API for cpu time that works well for VMWare.
VMWare ?
eg
typedef struct _virDomainCPUTime virDomainCPUTime; typedef virDomainCPUTime *virDomainCPUTimePtr;
#define VIR_DOMAIN_CPUTIME_ABS "abs" #define VIR_DOMAIN_CPUTIME_UTIL "util" ...define more stats later if desired...
To calculate 'util', we need 'interval' for calculating it. What interval do we use ? The libvirt need to calculate it periodically by making a thread ? "util" can be very easily calculated by util = cpuTime-delta / (interval(sec) * 1000000000) * 100. So, it depens on interval. For example, when we make fair-share-scheduler's balancing interval to 1sec, we never want interaval smaller than 1 sec. IIUC, virt-top users can specify 'interval'. So, virt-top needs to use absolute cpuTime delta even if libvirt provides 'util'. Because to show what users want, virt-top needs to use interval users want.
struct _virDomainCPUTime { char field[VIR_DOMAIN_CPU_TIME_FIELD_LENGTH]; unsigned long long value; };
typedef enum { VIR_DOMAIN_CPU_TIME_VIRTUAL = 0, VIR_DOMAIN_CPU_TIME_PHYSICAL = 1, } virDomainCPUTimeFlags;
int virDomainGetCPUTime(virDomainPtr dom, virDomainCPUTimePtr stats, int *nstats, unsigned int flags);
When querying per-virtual CPU time, nstats would be the number of virtual CPUs in the guest. eg
Why we take care of vcpus ?
virDomainInfo info; virDomainGetInfo(dom, &info); virDomainCPUTimePtr stats;
stats = malloc(sizeof(*stats)*info.nrVirtCPU);
virDomainGetCPUTime(dom, stats, info.nrVirtCPU, VIR_DOMAIN_CPU_TIME_VIRTUAL);
Or to get the break down per physical CPU, use the VIR_NODEINFO_MAX_CPUS macro
per vcpu pcpu time is not what I want. Hmm, others needs such information ? And virt-top need to re-calculate per-vm pcpu time by making sum of cputime per vcpu on pcus ? Anyway, cpuacct cgroup/ Linux kernel cannot provide per-vcpu-per-pcpu time and I cannot implement qemu driver. Thanks, -Kame

2011/4/20 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>:
On Tue, 19 Apr 2011 12:29:05 +0100 "Daniel P. Berrange" <berrange@redhat.com> wrote:
On Fri, Apr 15, 2011 at 04:43:03PM +0900, KAMEZAWA Hiroyuki wrote:
On Fri, 15 Apr 2011 09:43:15 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>: Also do you really need the absolute CPU time? yes, for virt-top -1.
I don't think that is correct. What virt-top ultimately displays to the end user is a % utilization. So it would be happy to get data in either % util & display that directly, or get absolute CPU time and calculate deltas to display % util.
virt-top calculate delta by itself.
I wonder if we could provide an API that could be used to provide either per-VCPU or per-PCPU statistics in one go, so that we can finally have an API for cpu time that works well for VMWare.
VMWare ?
VMware can provide various statistics about a virtual machine, but most (actually all, I think) are not provided as absolute values but in 20 sec slots with 1h of history (a vCenter can store longer history). Therefore, it's nearly impossible to get the absolute CPU time of a virtual machine since bootup. In most cases the enduser is not interested in the absolute CPU time but in the current CPU utilization in percent. VMware already calculates the utilization, but libvirt currently has not API to expose that. http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.... http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/cpu_... Matthias

On Wed, 20 Apr 2011 09:32:07 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/20 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>:
On Tue, 19 Apr 2011 12:29:05 +0100 "Daniel P. Berrange" <berrange@redhat.com> wrote:
On Fri, Apr 15, 2011 at 04:43:03PM +0900, KAMEZAWA Hiroyuki wrote:
On Fri, 15 Apr 2011 09:43:15 +0200 Matthias Bolte <matthias.bolte@googlemail.com> wrote:
2011/4/15 KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>: Also do you really need the absolute CPU time? yes, for virt-top -1.
I don't think that is correct. What virt-top ultimately displays to the end user is a % utilization. So it would be happy to get data in either % util & display that directly, or get absolute CPU time and calculate deltas to display % util.
virt-top calculate delta by itself.
I wonder if we could provide an API that could be used to provide either per-VCPU or per-PCPU statistics in one go, so that we can finally have an API for cpu time that works well for VMWare.
VMWare ?
VMware can provide various statistics about a virtual machine, but most (actually all, I think) are not provided as absolute values but in 20 sec slots with 1h of history (a vCenter can store longer history). Therefore, it's nearly impossible to get the absolute CPU time of a virtual machine since bootup.
In most cases the enduser is not interested in the absolute CPU time but in the current CPU utilization in percent. VMware already calculates the utilization, but libvirt currently has not API to expose that.
http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.... http://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/cpu_...
Hmm, okay. thank you for clarification. Then, I'll prepare 2 options. int virDomainGetPhysCPUStatistics(domain, unsigned long long *stats, int nr_stats, enum PhysCPU_Stat); PhysCPU_ABS_CPUTIME : Per Physical cpu abosolute usage time of domain. PhysCPU_UTIL : Per Physical cpu utilization of domain. The definition of 'utilization' depends on each VM. VMWare can provide its 20sec utilization. KVM will.....maybe 3sec interval is the best because it's 'top's default. But I myself don't take care of this interface and leave this as empty. To calculate it in 3sec interval, it seems to run a thread for doing that job. Maybe interval should be configurable by qemu.conf. If VMWare can provides PerVcpuPhysCPUUtilzation, please add it. I have no idea. Thanks, -Kame

On Fri, Apr 15, 2011 at 04:04:52PM +0900, KAMEZAWA Hiroyuki wrote: [...] The idea and the implementation is sound. However I have a problem with this proposed API:
+typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; + +typedef virDomainPcpuStatStruct *virDomainPcpuStatPtr;
[...]
/** + * virDomainPcpuStats: + * @dom: pointer to the domain object + * @stats: nr_stats-sized array of stat structures (returned) + * @nr_stats: number of cpu statistics requested + * @flags: unused, always pass 0 + * + * This function provides per-cpu statistics for the domain. 'cpu' here means + * not vcpu. + * + * Up to 'nr_stats' elements of 'stats' will be populated with cpu statistics + * from the domain. Only statistics supported by the domain, the driver, and + * this version of libvirt will be returned. + * + * Now, only cpuTime per cpu is reported in nanoseconds. + * + * Returns: The number of stats provided or -1 in case of failure. + */ +int virDomainPcpuStats (virDomainPtr dom, virDomainPcpuStatPtr stats, + unsigned int nr_stats, unsigned int flags)
Am I correct that the caller sets nr_stats == number of physical cores, and allocates an array of nr_stats * virDomainPcpuStats structs? We could never add more elements to the _virDomainPcpuStat structure, because that would break the ABI for existing callers. (Or I guess we could use the flags field). I think we should forget about the structure, and just use an array of unsigned long long's, but I'd like to hear what others think. Is it likely that we would return other per-physical-CPU stats in future? Rich. -- Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones New in Fedora 11: Fedora Windows cross-compiler. Compile Windows programs, test, and build Windows installers. Over 70 libraries supprt'd http://fedoraproject.org/wiki/MinGW http://www.annexia.org/fedora_mingw

On Sat, 16 Apr 2011 14:52:51 +0100 "Richard W.M. Jones" <rjones@redhat.com> wrote:
On Fri, Apr 15, 2011 at 04:04:52PM +0900, KAMEZAWA Hiroyuki wrote: [...]
The idea and the implementation is sound. However I have a problem with this proposed API:
+typedef struct _virDomainPcpuStat virDomainPcpuStatStruct; + +struct _virDomainPcpuStat { + unsigned long long cpuTime; +}; + +typedef virDomainPcpuStatStruct *virDomainPcpuStatPtr;
[...]
/** + * virDomainPcpuStats: + * @dom: pointer to the domain object + * @stats: nr_stats-sized array of stat structures (returned) + * @nr_stats: number of cpu statistics requested + * @flags: unused, always pass 0 + * + * This function provides per-cpu statistics for the domain. 'cpu' here means + * not vcpu. + * + * Up to 'nr_stats' elements of 'stats' will be populated with cpu statistics + * from the domain. Only statistics supported by the domain, the driver, and + * this version of libvirt will be returned. + * + * Now, only cpuTime per cpu is reported in nanoseconds. + * + * Returns: The number of stats provided or -1 in case of failure. + */ +int virDomainPcpuStats (virDomainPtr dom, virDomainPcpuStatPtr stats, + unsigned int nr_stats, unsigned int flags)
Am I correct that the caller sets nr_stats == number of physical cores, and allocates an array of nr_stats * virDomainPcpuStats structs?
We could never add more elements to the _virDomainPcpuStat structure, because that would break the ABI for existing callers. (Or I guess we could use the flags field).
I think we should forget about the structure, and just use an array of unsigned long long's, but I'd like to hear what others think.
I received the same reply from others and I'd like to go that way.
Is it likely that we would return other per-physical-CPU stats in future?
Not sure. But if some guy adds more statistics to cpuacct cgroup, as # of vmenter/vmexit etc...we can count it. So, I'd like to pass 'What you want' flag as VirDomainPhysicalCpuStat(domain, unsigned long long *starts, int nr_stats, int what_I_want) and then, pass VIT_DOMAIN_PHYSICAL_CPU_STAT_CPUTIME as it argument. what_I_want. Thanks, -Kame

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- daemon/remote.c | 61 +++++++++++++++++++++++++++++++++++ daemon/remote_dispatch_args.h | 1 + daemon/remote_dispatch_prototypes.h | 8 ++++ daemon/remote_dispatch_ret.h | 1 + daemon/remote_dispatch_table.h | 5 +++ src/remote/remote_driver.c | 39 +++++++++++++++++++++- src/remote/remote_protocol.c | 31 ++++++++++++++++++ src/remote/remote_protocol.h | 27 +++++++++++++++ src/remote/remote_protocol.x | 18 ++++++++++- 9 files changed, 189 insertions(+), 2 deletions(-) diff --git a/daemon/remote.c b/daemon/remote.c index 5de1055..9757f13 100644 --- a/daemon/remote.c +++ b/daemon/remote.c @@ -1138,6 +1138,67 @@ remoteDispatchDomainMemoryStats(struct qemud_server *server ATTRIBUTE_UNUSED, } static int +remoteDispatchDomainPcpuStats(struct qemud_server *server ATTRIBUTE_UNUSED, + struct qemud_client *client ATTRIBUTE_UNUSED, + virConnectPtr conn, + remote_message_header *hdr ATTRIBUTE_UNUSED, + remote_error *rerr, + remote_domain_pcpu_stats_args *args, + remote_domain_pcpu_stats_ret *ret) +{ + virDomainPtr dom; + struct _virDomainPcpuStat *stats; + unsigned int nr_stats, i; + + if (!conn) { + remoteDispatchFormatError(rerr, "%s", _("connection not open")); + return -1; + } + + if (args->nr_stats > REMOTE_DOMAIN_PCPU_STATS_MAX) { + remoteDispatchFormatError(rerr, "%s", + _("nr_stats > REMOTE_DOMAIN_PCPU_STATS_MAX")); + return -1; + } + + dom = get_nonnull_domain(conn, args->dom); + if (dom == NULL) { + remoteDispatchConnError(rerr, conn); + return -1; + } + + /* Allocate stats array for making dispatch call */ + if (VIR_ALLOC_N(stats, args->nr_stats) < 0) { + virDomainFree(dom); + remoteDispatchOOMError(rerr); + return -1; + } + + nr_stats = virDomainPcpuStats(dom, stats, args->nr_stats, 0); + if (nr_stats == -1) { + VIR_FREE(stats); + remoteDispatchConnError(rerr, conn); + virDomainFree(dom); + return -1; + } + virDomainFree(dom); + + /* Allocate return buffer */ + if (VIR_ALLOC_N(ret->stats.stats_val, nr_stats) < 0) { + VIR_FREE(stats); + remoteDispatchOOMError(rerr); + return -1; + } + + /* Copy the stats into the xdr return structure */ + for (i = 0; i < nr_stats; i++) + ret->stats.stats_val[i].val = stats[i].cpuTime; + ret->stats.stats_len = nr_stats; + VIR_FREE(stats); + return 0; +} + +static int remoteDispatchDomainBlockPeek(struct qemud_server *server ATTRIBUTE_UNUSED, struct qemud_client *client ATTRIBUTE_UNUSED, virConnectPtr conn, diff --git a/daemon/remote_dispatch_args.h b/daemon/remote_dispatch_args.h index f9537d7..42fc7b2 100644 --- a/daemon/remote_dispatch_args.h +++ b/daemon/remote_dispatch_args.h @@ -178,3 +178,4 @@ remote_domain_migrate_set_max_speed_args val_remote_domain_migrate_set_max_speed_args; remote_storage_vol_upload_args val_remote_storage_vol_upload_args; remote_storage_vol_download_args val_remote_storage_vol_download_args; + remote_domain_pcpu_stats_args val_remote_domain_pcpu_stats_args; diff --git a/daemon/remote_dispatch_prototypes.h b/daemon/remote_dispatch_prototypes.h index 18bf41d..5dece04 100644 --- a/daemon/remote_dispatch_prototypes.h +++ b/daemon/remote_dispatch_prototypes.h @@ -498,6 +498,14 @@ static int remoteDispatchDomainOpenConsole( remote_error *err, remote_domain_open_console_args *args, void *ret); +static int remoteDispatchDomainPcpuStats( + struct qemud_server *server, + struct qemud_client *client, + virConnectPtr conn, + remote_message_header *hdr, + remote_error *err, + remote_domain_pcpu_stats_args *args, + remote_domain_pcpu_stats_ret *ret); static int remoteDispatchDomainPinVcpu( struct qemud_server *server, struct qemud_client *client, diff --git a/daemon/remote_dispatch_ret.h b/daemon/remote_dispatch_ret.h index 114e832..4c0b66d 100644 --- a/daemon/remote_dispatch_ret.h +++ b/daemon/remote_dispatch_ret.h @@ -140,3 +140,4 @@ remote_domain_is_updated_ret val_remote_domain_is_updated_ret; remote_get_sysinfo_ret val_remote_get_sysinfo_ret; remote_domain_get_blkio_parameters_ret val_remote_domain_get_blkio_parameters_ret; + remote_domain_pcpu_stats_ret val_remote_domain_pcpu_stats_ret; diff --git a/daemon/remote_dispatch_table.h b/daemon/remote_dispatch_table.h index b39f7c2..cb1fa05 100644 --- a/daemon/remote_dispatch_table.h +++ b/daemon/remote_dispatch_table.h @@ -1052,3 +1052,8 @@ .args_filter = (xdrproc_t) xdr_remote_storage_vol_download_args, .ret_filter = (xdrproc_t) xdr_void, }, +{ /* DomainPcpuStats => 210 */ + .fn = (dispatch_fn) remoteDispatchDomainPcpuStats, + .args_filter = (xdrproc_t) xdr_remote_domain_pcpu_stats_args, + .ret_filter = (xdrproc_t) xdr_remote_domain_pcpu_stats_ret, +}, diff --git a/src/remote/remote_driver.c b/src/remote/remote_driver.c index d00b9ee..6f7c2e7 100644 --- a/src/remote/remote_driver.c +++ b/src/remote/remote_driver.c @@ -4172,6 +4172,43 @@ done: return rv; } + +static int +remoteDomainPcpuStats (virDomainPtr domain, + struct _virDomainPcpuStat *stats, + unsigned int nr_stats) +{ + int rv = -1; + remote_domain_pcpu_stats_args args; + remote_domain_pcpu_stats_ret ret; + struct private_data *priv = domain->conn->privateData; + unsigned int i; + + remoteDriverLock(priv); + + make_nonnull_domain (&args.dom, domain); + args.nr_stats = nr_stats; + + memset (&ret, 0, sizeof ret); + + if (call (domain->conn, priv, 0, REMOTE_PROC_DOMAIN_PCPU_STATS, + (xdrproc_t) xdr_remote_domain_pcpu_stats_args, + (char *) &args, + (xdrproc_t) xdr_remote_domain_pcpu_stats_ret, + (char *) &ret) == -1) + goto done; + + for (i = 0; i < ret.stats.stats_len; i++) + stats[i].cpuTime = ret.stats.stats_val[i].val; + rv = ret.stats.stats_len; + xdr_free((xdrproc_t) xdr_remote_domain_pcpu_stats_ret, (char *) &ret); + + done: + remoteDriverUnlock(priv); + return rv; +} + + static int remoteDomainBlockPeek (virDomainPtr domain, const char *path, @@ -11261,7 +11298,7 @@ static virDriver remote_driver = { remoteDomainBlockStats, /* domainBlockStats */ remoteDomainInterfaceStats, /* domainInterfaceStats */ remoteDomainMemoryStats, /* domainMemoryStats */ - NULL, /* domainPcpuStats */ + remoteDomainPcpuStats, /* domainPcpuStats */ remoteDomainBlockPeek, /* domainBlockPeek */ remoteDomainMemoryPeek, /* domainMemoryPeek */ remoteDomainGetBlockInfo, /* domainGetBlockInfo */ diff --git a/src/remote/remote_protocol.c b/src/remote/remote_protocol.c index 5604371..53825ea 100644 --- a/src/remote/remote_protocol.c +++ b/src/remote/remote_protocol.c @@ -872,6 +872,37 @@ xdr_remote_domain_memory_stats_ret (XDR *xdrs, remote_domain_memory_stats_ret *o } bool_t +xdr_remote_domain_pcpu_stats_args (XDR *xdrs, remote_domain_pcpu_stats_args *objp) +{ + + if (!xdr_remote_nonnull_domain (xdrs, &objp->dom)) + return FALSE; + if (!xdr_u_int (xdrs, &objp->nr_stats)) + return FALSE; + return TRUE; +} + +bool_t +xdr_remote_domain_pcpu_stat (XDR *xdrs, remote_domain_pcpu_stat *objp) +{ + + if (!xdr_uint64_t (xdrs, &objp->val)) + return FALSE; + return TRUE; +} + +bool_t +xdr_remote_domain_pcpu_stats_ret (XDR *xdrs, remote_domain_pcpu_stats_ret *objp) +{ + char **objp_cpp0 = (char **) (void *) &objp->stats.stats_val; + + if (!xdr_array (xdrs, objp_cpp0, (u_int *) &objp->stats.stats_len, REMOTE_DOMAIN_PCPU_STATS_MAX, + sizeof (remote_domain_pcpu_stat), (xdrproc_t) xdr_remote_domain_pcpu_stat)) + return FALSE; + return TRUE; +} + +bool_t xdr_remote_domain_block_peek_args (XDR *xdrs, remote_domain_block_peek_args *objp) { diff --git a/src/remote/remote_protocol.h b/src/remote/remote_protocol.h index d9bf151..f220de9 100644 --- a/src/remote/remote_protocol.h +++ b/src/remote/remote_protocol.h @@ -59,6 +59,7 @@ typedef remote_nonnull_string *remote_string; #define REMOTE_AUTH_SASL_DATA_MAX 65536 #define REMOTE_AUTH_TYPE_LIST_MAX 20 #define REMOTE_DOMAIN_MEMORY_STATS_MAX 1024 +#define REMOTE_DOMAIN_PCPU_STATS_MAX 1024 #define REMOTE_DOMAIN_SNAPSHOT_LIST_NAMES_MAX 1024 #define REMOTE_DOMAIN_BLOCK_PEEK_BUFFER_MAX 65536 #define REMOTE_DOMAIN_MEMORY_PEEK_BUFFER_MAX 65536 @@ -463,6 +464,25 @@ struct remote_domain_memory_stats_ret { }; typedef struct remote_domain_memory_stats_ret remote_domain_memory_stats_ret; +struct remote_domain_pcpu_stats_args { + remote_nonnull_domain dom; + u_int nr_stats; +}; +typedef struct remote_domain_pcpu_stats_args remote_domain_pcpu_stats_args; + +struct remote_domain_pcpu_stat { + uint64_t val; +}; +typedef struct remote_domain_pcpu_stat remote_domain_pcpu_stat; + +struct remote_domain_pcpu_stats_ret { + struct { + u_int stats_len; + remote_domain_pcpu_stat *stats_val; + } stats; +}; +typedef struct remote_domain_pcpu_stats_ret remote_domain_pcpu_stats_ret; + struct remote_domain_block_peek_args { remote_nonnull_domain dom; remote_nonnull_string path; @@ -2413,6 +2433,7 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED = 207, REMOTE_PROC_STORAGE_VOL_UPLOAD = 208, REMOTE_PROC_STORAGE_VOL_DOWNLOAD = 209, + REMOTE_PROC_DOMAIN_PCPU_STATS = 210, }; typedef enum remote_procedure remote_procedure; @@ -2507,6 +2528,9 @@ extern bool_t xdr_remote_domain_interface_stats_ret (XDR *, remote_domain_inter extern bool_t xdr_remote_domain_memory_stats_args (XDR *, remote_domain_memory_stats_args*); extern bool_t xdr_remote_domain_memory_stat (XDR *, remote_domain_memory_stat*); extern bool_t xdr_remote_domain_memory_stats_ret (XDR *, remote_domain_memory_stats_ret*); +extern bool_t xdr_remote_domain_pcpu_stats_args (XDR *, remote_domain_pcpu_stats_args*); +extern bool_t xdr_remote_domain_pcpu_stat (XDR *, remote_domain_pcpu_stat*); +extern bool_t xdr_remote_domain_pcpu_stats_ret (XDR *, remote_domain_pcpu_stats_ret*); extern bool_t xdr_remote_domain_block_peek_args (XDR *, remote_domain_block_peek_args*); extern bool_t xdr_remote_domain_block_peek_ret (XDR *, remote_domain_block_peek_ret*); extern bool_t xdr_remote_domain_memory_peek_args (XDR *, remote_domain_memory_peek_args*); @@ -2864,6 +2888,9 @@ extern bool_t xdr_remote_domain_interface_stats_ret (); extern bool_t xdr_remote_domain_memory_stats_args (); extern bool_t xdr_remote_domain_memory_stat (); extern bool_t xdr_remote_domain_memory_stats_ret (); +extern bool_t xdr_remote_domain_pcpu_stats_args (); +extern bool_t xdr_remote_domain_pcpu_stat (); +extern bool_t xdr_remote_domain_pcpu_stats_ret (); extern bool_t xdr_remote_domain_block_peek_args (); extern bool_t xdr_remote_domain_block_peek_ret (); extern bool_t xdr_remote_domain_memory_peek_args (); diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index 675eccd..7ab7704 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -146,6 +146,9 @@ const REMOTE_AUTH_TYPE_LIST_MAX = 20; /* Upper limit on list of memory stats */ const REMOTE_DOMAIN_MEMORY_STATS_MAX = 1024; +/* Upper limit on list of pcpu stats */ +const REMOTE_DOMAIN_PCPU_STATS_MAX = 1024; + /* Upper limit on lists of domain snapshots. */ const REMOTE_DOMAIN_SNAPSHOT_LIST_NAMES_MAX = 1024; @@ -554,6 +557,18 @@ struct remote_domain_memory_stats_ret { remote_domain_memory_stat stats<REMOTE_DOMAIN_MEMORY_STATS_MAX>; }; +struct remote_domain_pcpu_stats_args { + remote_nonnull_domain dom; + u_int nr_stats; +}; + +struct remote_domain_pcpu_stat { + unsigned hyper val; +}; +struct remote_domain_pcpu_stats_ret { + remote_domain_pcpu_stat stats<REMOTE_DOMAIN_PCPU_STATS_MAX>; +}; + struct remote_domain_block_peek_args { remote_nonnull_domain dom; remote_nonnull_string path; @@ -2176,7 +2191,8 @@ enum remote_procedure { REMOTE_PROC_DOMAIN_GET_BLKIO_PARAMETERS = 206, REMOTE_PROC_DOMAIN_MIGRATE_SET_MAX_SPEED = 207, REMOTE_PROC_STORAGE_VOL_UPLOAD = 208, - REMOTE_PROC_STORAGE_VOL_DOWNLOAD = 209 + REMOTE_PROC_STORAGE_VOL_DOWNLOAD = 209, + REMOTE_PROC_DOMAIN_PCPU_STATS = 210 /* * Notice how the entries are grouped in sets of 10 ? -- 1.7.4.1

Values of all cpus are returned by a tuple of Dict as: [{cpuTime: xxxx}, {cpuTime: yyyy}} # when the number of cpu is 2. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- python/generator.py | 1 + python/libvirt-override-api.xml | 5 ++++ python/libvirt-override.c | 42 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 0 deletions(-) diff --git a/python/generator.py b/python/generator.py index 4fa4f65..d2695bb 100755 --- a/python/generator.py +++ b/python/generator.py @@ -307,6 +307,7 @@ skip_impl = ( 'virDomainBlockStats', 'virDomainInterfaceStats', 'virDomainMemoryStats', + 'virDomainPcpuStats', 'virNodeGetCellsFreeMemory', 'virDomainGetSchedulerType', 'virDomainGetSchedulerParameters', diff --git a/python/libvirt-override-api.xml b/python/libvirt-override-api.xml index 54deeb5..5ee3881 100644 --- a/python/libvirt-override-api.xml +++ b/python/libvirt-override-api.xml @@ -127,6 +127,11 @@ <return type='virDomainMemoryStats' info='a dictionary of statistics'/> <arg name='domain' type='virDomainPtr' info='a domain object'/> </function> + <function name='virDomainPcpuStats' file='python'> + <info>Extracts per cpu statistics for a domain </info> + <return type='virDomainPcpuStats *' info='a tuple of statistics' /> + <arg name='domain' type='virDomainPtr' info='a domain object'/> + </function> <function name="virNodeGetCellsFreeMemory" file='python'> <info>Returns the available memory for a list of cells</info> <arg name='conn' type='virConnectPtr' info='pointer to the hypervisor connection'/> diff --git a/python/libvirt-override.c b/python/libvirt-override.c index 4a9b432..c2ff0aa 100644 --- a/python/libvirt-override.c +++ b/python/libvirt-override.c @@ -164,6 +164,47 @@ libvirt_virDomainMemoryStats(PyObject *self ATTRIBUTE_UNUSED, PyObject *args) { return info; } +static PyObject* +libvirt_virDomainPcpuStats(PyObject *self ATTRIBUTE_UNUSED, PyObject *args) { + virDomainPtr domain; + virNodeInfo nodeinfo; + PyObject *pyobj_domain; + PyObject *info, *info2; + int nr_stats, i_retval, i; + virDomainPcpuStatPtr stats; + + if (!PyArg_ParseTuple(args, (char *)"O:virDomainPcpuStats", &pyobj_domain)) + return (NULL); + + domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + LIBVIRT_END_ALLOW_THREADS; + if (i_retval < 0) + return VIR_PY_NONE; + + stats = malloc(sizeof(struct _virDomainPcpuStat) * nodeinfo.cpus); + + nr_stats = virDomainPcpuStats(domain, stats, nodeinfo.cpus, 0); + + if (nr_stats == -1) + return VIR_PY_NONE; + + if ((info = PyTuple_New(nodeinfo.cpus)) == NULL) + return VIR_PY_NONE; + + for (i = 0; i < nr_stats; i++) { + if ((info2 = PyDict_New()) == NULL) + return VIR_PY_NONE; + PyDict_SetItem(info2, libvirt_constcharPtrWrap("cpuTime"), + PyLong_FromLongLong(stats[i].cpuTime)); + PyTuple_SetItem(info, i, info2); + } + return (info); +} + + static PyObject * libvirt_virDomainGetSchedulerType(PyObject *self ATTRIBUTE_UNUSED, PyObject *args) { @@ -3545,6 +3586,7 @@ static PyMethodDef libvirtMethods[] = { {(char *) "virDomainBlockStats", libvirt_virDomainBlockStats, METH_VARARGS, NULL}, {(char *) "virDomainInterfaceStats", libvirt_virDomainInterfaceStats, METH_VARARGS, NULL}, {(char *) "virDomainMemoryStats", libvirt_virDomainMemoryStats, METH_VARARGS, NULL}, + {(char *) "virDomainPcpuStats", libvirt_virDomainPcpuStats, METH_VARARGS, NULL}, {(char *) "virNodeGetCellsFreeMemory", libvirt_virNodeGetCellsFreeMemory, METH_VARARGS, NULL}, {(char *) "virDomainGetSchedulerType", libvirt_virDomainGetSchedulerType, METH_VARARGS, NULL}, {(char *) "virDomainGetSchedulerParameters", libvirt_virDomainGetSchedulerParameters, METH_VARARGS, NULL}, -- 1.7.4.1

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> --- src/qemu/qemu_conf.c | 1 + src/qemu/qemu_driver.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletions(-) diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c index bb5421b..89c5b4c 100644 --- a/src/qemu/qemu_conf.c +++ b/src/qemu/qemu_conf.c @@ -302,6 +302,7 @@ int qemudLoadDriverConfig(struct qemud_driver *driver, } else { driver->cgroupControllers = (1 << VIR_CGROUP_CONTROLLER_CPU) | + (1 << VIR_CGROUP_CONTROLLER_CPUACCT) | (1 << VIR_CGROUP_CONTROLLER_DEVICES) | (1 << VIR_CGROUP_CONTROLLER_MEMORY) | (1 << VIR_CGROUP_CONTROLLER_BLKIO); diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 0a78a70..6db4f8a 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -5011,6 +5011,75 @@ cleanup: } static int +qemuDomainPcpuStats (virDomainPtr dom, + struct _virDomainPcpuStat *stats, + unsigned int nr_stats) +{ + struct qemud_driver *driver = dom->conn->privateData; + virDomainObjPtr vm = NULL; + virCgroupPtr group = NULL; + int ret = -1; + unsigned long long *array = NULL; + int i; + + + qemuDriverLock(driver); + if (!qemuCgroupControllerActive(driver, VIR_CGROUP_CONTROLLER_CPUACCT)) { + qemuDriverUnlock(driver); + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("cgroup CPUACCT controller is not mounted")); + goto cleanup; + } + + vm = virDomainFindByUUID(&driver->domains, dom->uuid); + qemuDriverUnlock(driver); + + if (!vm) { + char uuidstr[VIR_UUID_STRING_BUFLEN]; + virUUIDFormat(dom->uuid, uuidstr); + qemuReportError(VIR_ERR_NO_DOMAIN, + _("no domain with matching uuid '%s'"), uuidstr); + goto cleanup; + } + + if (VIR_ALLOC_N(array, nr_stats) < 0) { + virReportOOMError(); + goto cleanup; + } + + if (!virDomainObjIsActive(vm)) { + qemuReportError(VIR_ERR_OPERATION_INVALID, + "%s", _("domain is not running")); + goto cleanup; + } + + if (virCgroupForDomain(driver->cgroup, vm->def->name, &group, 0) != 0) { + qemuReportError(VIR_ERR_INTERNAL_ERROR, + _("cannot find cgroup for domain %s"), vm->def->name); + goto cleanup; + } + + ret = virCgroupGetCpuacctUsagePercpu(group, nr_stats, array); + if (ret <= 0) { + ret = -1; + goto cleanup; + } + + for (i = 0; i < ret; i++) + stats[i].cpuTime = array[i]; + +cleanup: + if (group) + virCgroupFree(&group); + VIR_FREE(array); + if (vm) + virDomainObjUnlock(vm); + return ret; +} + + + +static int qemudDomainBlockPeek (virDomainPtr dom, const char *path, unsigned long long offset, size_t size, @@ -6981,7 +7050,7 @@ static virDriver qemuDriver = { qemudDomainBlockStats, /* domainBlockStats */ qemudDomainInterfaceStats, /* domainInterfaceStats */ qemudDomainMemoryStats, /* domainMemoryStats */ - NULL, /* domainPcpuStats */ + qemuDomainPcpuStats, /* domainPcpuStats */ qemudDomainBlockPeek, /* domainBlockPeek */ qemudDomainMemoryPeek, /* domainMemoryPeek */ qemuDomainGetBlockInfo, /* domainGetBlockInfo */ -- 1.7.4.1
participants (4)
-
Daniel P. Berrange
-
KAMEZAWA Hiroyuki
-
Matthias Bolte
-
Richard W.M. Jones