[libvirt] Unattended guest installation fails
by ChandraShekar Shastri
Hi,
I am following the below link to install a Fedora19 guest on Fedora19 Host.
http://kashyapc.com/2011/08/18/unattended-guest-install-with-a-local-kick...
Version :
libvirtd (libvirt) 1.0.5.1,
QEMU emulator version 1.4.2, Copyright (c) 2003-2008 Fabrice Bellard
I have the iso which I loop mounted and created a http repo using python
module "python -m SimpleHTTPServer". This repo is properly accessible and
the guest is able to pull the initrd.img and vmlinuz but further it fails.
Please find the attached xml and console messages and let me know how to
proceed further.
Thanks,
Chandrashekar
10 years, 2 months
[libvirt] [PATCH 0/4] Couple of <cpu/> fixes
by Michal Privoznik
*** BLURB HERE ***
Michal Privoznik (4):
util: Introduce virBufferAddBuffer
virCPUDefFormatBufFull: Use our general error handling pattern
cpu: Format <cpu/> properly
qemu: Use correct flags for ABI stability check in SaveImageUpdateDef
src/conf/cpu_conf.c | 31 ++++--
src/libvirt_private.syms | 1 +
src/qemu/qemu_driver.c | 4 +-
src/util/virbuffer.c | 33 +++++-
src/util/virbuffer.h | 1 +
.../qemuxml2argv-cpu-host-kvmclock.xml | 3 +-
tests/virbuftest.c | 112 +++++++++++++++++++++
7 files changed, 169 insertions(+), 16 deletions(-)
--
2.0.5
10 years, 2 months
[libvirt] Libvirt Forwarding Incoming Connections
by kim
Hi Libvirt Developers
The link at http://wiki.libvirt.org/page/Networking under "Forwarding
Incoming Connections " for the advanced version of the script is not
working.
We experienced problems with the script in that there is no interface
specified and although the port forwarding worked, it was too global and
caused a loop when we forwarded port 80 to a guest machine that required
outbound port 80 connectivity.
The advanced script might (and probably does) resolve the problems that
we experienced but as it is not available here is our script in the hope
that it may be useful to others.
Start of script:
#!/bin/sh
# derived from script at "http://wiki.libvirt.org/page/Networking"
debugfile=/dev/null
# set debugfile to desired path and uncomment to debug
#debugfile=/mydebugpath/libvirt_hooks_qemu.debug
# ${1} AND ${2} ARE PASSED TO THIS SCRIPT BY LIBVIRTD
VIR_DOMAIN=${1}
ACTION=${2}
echo -e "VIR_DOMAIN="${VIR_DOMAIN}'\n'"ACTION="${ACTION}'\n' >
${debugfile}
function setiptables ()
{
local Host_interface=${1}
local Guest_name=${2}
local Guest_ipaddr=${3}
local -a Host_port=("${!4}")
local -a Guest_port=("${!5}")
echo -e
"Host_interface="${Host_interface}'\n'"Guest_name="${Guest_name}'\n'"Guest_ipaddr="${Guest_ipaddr}'\n'"Host_port=
${Host_port[@]}"'\n'"Guest_port=${Guest_port[@]}"'\n' >> ${debugfile}
length=$(( ${#Host_port[@]} - 1 ))
if [ "${VIR_DOMAIN}" = "${Guest_name}" ]; then
if [ "${ACTION}" = "stopped" -o "${ACTION}" =
"reconnect" ]; then
for i in `seq 0 $length`; do
PrerouteCmd="iptables -t nat -D PREROUTING -p tcp
--dport ${Host_port[$i]} -j DNAT -i ${Host_interface} --to
${Guest_ipaddr}:${Guest_port[$i]}"
ForwardCmd="iptables -D FORWARD -d
${Guest_ipaddr}/32 -p tcp -m state --state NEW -m tcp --dport
${Guest_port[$i]} -j ACCEPT"
sh -c "${PrerouteCmd}"
sh -c "${ForwardCmd}"
echo -e
${PrerouteCmd}'\n'${ForwardCmd}'\n' >> ${debugfile}
done
fi
if [ "${ACTION}" = "start" -o "${ACTION}" = "reconnect"
]; then
for i in `seq 0 $length`; do
PrerouteCmd="iptables -t nat -A PREROUTING
-p tcp --dport ${Host_port[$i]} -j DNAT -i ${Host_interface} --to
${Guest_ipaddr}:${Guest_port[$i]}"
ForwardCmd="iptables -I FORWARD -d
${Guest_ipaddr}/32 -p tcp -m state --state NEW -m tcp --dport
${Guest_port[$i]} -j ACCEPT"
sh -c "${PrerouteCmd}"
sh -c "${ForwardCmd}"
echo -e
${PrerouteCmd}'\n'${ForwardCmd}'\n' >> ${debugfile}
done
fi
echo -e '\n' >> ${debugfile}
fi
return 0
}
# **********************************
# Guest1 *
# **********************************
Guest=Guest1
Guest_ip=192.168.122.xxx
# Forwarding from ethernet Interface eno1
interface=eno1
# This will route port 80 on ethernet interface eno1 to Guest IP address
192.168.122.xxx port 8080
# This will route port 443 on ethernet interface eno1 to Guest IP
address 192.168.122.xxx port 443
# use an equal number of guest and host ports
Host_port_array=( '80' '443' )
Guest_port_array=( '8080' '443' )
setiptables ${interface} ${Guest} ${Guest_ip} Host_port_array[@]
Guest_port_array[@]
# Forwarding from vpn interface tun0 (example for openvpn connection)
interface=tun0
# This will route port 3395 on vpn interface tun0 to Guest IP address
192.168.122.xxx port 3395
# use an equal number of guest and host ports
Host_port_array=( '3395' )
Guest_port_array=( '3395' )
setiptables ${interface} ${Guest} ${Guest_ip} Host_port_array[@]
Guest_port_array[@]
# Multiple guest machines can be configured in a similar way
# ********* end of script ***********
10 years, 2 months
[libvirt] [PATCH] util: do not resotre the VF that is in use by another active guest
by Zhang Bo
If we assign a VF, which has already been used by an active guest, to another
guest, and try to start the 2nd guest later on, the 2nd guest would not
start, and the VF won't work anymore.
Steps to reproduce the problem:
1 Assign a VF to guest A, and start the guest. The VF works fine.
2 Assign the VF to guest B, and try to start guest B. guest B can't start.
3 Guest A's network becomes unreachable, because its VF now doesn't work.
Reasons for this problem is:
1 When we start guest B, libvirtd checks whether the VF is already used
by another guest, if so, qemuPrepareHostDevices() returns with failure.
2 Then, libvirtd calls qemuProcessStop() to cleanup resourses, which would
restore the VFs of guest B. Specifically, it reads
/var/run/libvirt/hostdevmgr/ethX_vfX to get the VF's original MAC/VLAN,
and set it back to current VF.
3 As that the VF is still in use by guest A, libvirtd just set its MAC/VLAN
to another value, the VF doesn't work anymore.
Detailed flow:
qemuProcessStart
\___qemuPrepareHostDevices(if it fails, goto cleanup)
\ \_qemuPrepareHostdevPCIDevices
\ \_____virHostdevPreparePCIDevices
\ \____LOOP1:virPCIDeviceListFind
\ (whether the device is in use by another active guest)
\ if the VF has been assigned to, qemuPrepareHostDevices() fails
\___cleanup:
\__qemuProcessStop
\____qemuDomainReAttachHostDevices
\____qemuDomainReAttachHostdevDevices
\____virHostdevReAttachPCIDevices
\_____virHostdevNetConfigRestore
(it gets MAC/VLAN form /var/run/libvirt/hostdevmgr/ethX_vfX,
and set it back to the VF, making the VF unusable)
This patch checks whether the VF is already in use before restoring it.
Signed-off-by: Zhang Bo <oscar.zhangbo(a)huawei.com>
Signed-off-by: Zhuang Yanying <zhuangyanying(a)huawei.com>
---
src/util/virhostdev.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/src/util/virhostdev.c b/src/util/virhostdev.c
index 9678e2b..ee19400 100644
--- a/src/util/virhostdev.c
+++ b/src/util/virhostdev.c
@@ -816,9 +816,21 @@ virHostdevReAttachPCIDevices(virHostdevManagerPtr hostdev_mgr,
* For SRIOV net host devices, unset mac and port profile before
* reset and reattach device
*/
- for (i = 0; i < nhostdevs; i++)
+ for (i = 0; i < nhostdevs; i++){
+ virPCIDevicePtr devNotInuse = NULL;
+ virDevicePCIAddressPtr addr = NULL;
+ virDomainHostdevDefPtr hostdev = hostdevs[i];
+ addr = &hostdev->source.subsys.u.pci.addr;
+ devNotInuse = virPCIDeviceListFindByIDs(pcidevs,
+ addr->domain, addr->bus,
+ addr->slot, addr->function);
+ if (!devNotInuse) {
+ continue;
+ }
+
virHostdevNetConfigRestore(hostdevs[i], hostdev_mgr->stateDir,
oldStateDir);
+ }
for (i = 0; i < virPCIDeviceListCount(pcidevs); i++) {
virPCIDevicePtr dev = virPCIDeviceListGet(pcidevs, i);
--
1.7.12.4
10 years, 2 months
[libvirt] [PATCH 0/7] Automaticaly fill <memory> element for NUMA enabled guests
by Peter Krempa
Conclusion of the memory refactoring series. These patches depend on the two
previous series and for convenience can be fetched at:
git fetch git://pipo.sk/pipo/libvirt.git/ memory-refactors
This series changes the behavior of the <memory> element in case the guest has
NUMA enabled and fills automatically the sum of sizes of numa nodes instead of
relying on the user passing correct data.
Peter Krempa (7):
qemu: Forbid seting maximum memory size with the API with NUMA enabled
qemu: lxc: Clarify error message when setting current memory
conf: Hoist validation of memory size into the post parse callback
conf: Replace access to def-mem.max_balloon with accessor functions
qemu: command: Add helper to align memory sizes
conf: numa: Add helper to count total memory size configured in NUMA
conf: Automatically use NUMA memory size in case NUMA is enabled
src/conf/domain_conf.c | 101 ++++++++++++++++-----
src/conf/domain_conf.h | 4 +
src/conf/numa_conf.c | 13 +++
src/conf/numa_conf.h | 1 +
src/hyperv/hyperv_driver.c | 2 +-
src/libvirt_private.syms | 4 +
src/libxl/libxl_conf.c | 2 +-
src/libxl/libxl_driver.c | 8 +-
src/lxc/lxc_cgroup.c | 2 +-
src/lxc/lxc_driver.c | 27 +++---
src/lxc/lxc_fuse.c | 4 +-
src/lxc/lxc_native.c | 4 +-
src/openvz/openvz_driver.c | 2 +-
src/parallels/parallels_driver.c | 2 +-
src/parallels/parallels_sdk.c | 12 +--
src/phyp/phyp_driver.c | 11 ++-
src/qemu/qemu_command.c | 23 +++--
src/qemu/qemu_domain.c | 21 +++++
src/qemu/qemu_domain.h | 2 +
src/qemu/qemu_driver.c | 44 +++++----
src/qemu/qemu_hotplug.c | 8 +-
src/qemu/qemu_process.c | 2 +-
src/test/test_driver.c | 8 +-
src/uml/uml_driver.c | 8 +-
src/vbox/vbox_common.c | 4 +-
src/vmware/vmware_driver.c | 2 +-
src/vmx/vmx.c | 12 +--
src/xen/xm_internal.c | 14 +--
src/xenapi/xenapi_driver.c | 2 +-
src/xenapi/xenapi_utils.c | 4 +-
src/xenconfig/xen_common.c | 8 +-
src/xenconfig/xen_sxpr.c | 9 +-
.../qemuxml2argv-numatune-memnode.args | 2 +-
33 files changed, 248 insertions(+), 124 deletions(-)
--
2.2.2
10 years, 2 months
[libvirt] [PATCH 00/11] Add support for memory hot(un)plug
by Peter Krempa
A final version of the memory hotplug series. This version incorporates
feedback on the RFC and fixes the few missing bits.
This series depends on the three previous refactor series:
http://www.redhat.com/archives/libvir-list/2015-February/msg00532.html
[libvirt] [PATCH 00/24] Move all NUMA related configuration into one structure
http://www.redhat.com/archives/libvir-list/2015-February/msg00557.html
[libvirt] [PATCH 0/3] Fix memory ABI stability check issues
http://www.redhat.com/archives/libvir-list/2015-February/msg00633.html
[libvirt] [PATCH 0/7] Automaticaly fill <memory> element for NUMA enabled guests
For convenience, you can fetch the complete series in my public branch:
git fetch git://pipo.sk/pipo/libvirt.git memory-hotplug-v1
Peter Krempa (11):
qemu: caps: Add capability bit for the "pc-dimm" device
conf: Add support for parsing and formatting max memory and slot count
qemu: Implement setup of memory hotplug parameters
conf: Add device address type for dimm devices
conf: Add interface to parse and format memory device information
qemu: memdev: Add infrastructure to load memory device information
qemu: migration: Forbid migration with memory modules lacking info
qemu: add support for memory devices
qemu: conf: Add support for memory device cold(un)plug
qemu: Implement memory device hotplug
qemu: Implement memory device hotunplug
docs/formatdomain.html.in | 98 ++++
docs/schemas/domaincommon.rng | 76 +++
src/bhyve/bhyve_domain.c | 9 +-
src/conf/domain_conf.c | 566 ++++++++++++++++++++-
src/conf/domain_conf.h | 59 +++
src/libvirt_private.syms | 7 +
src/libxl/libxl_domain.c | 8 +
src/lxc/lxc_domain.c | 8 +
src/openvz/openvz_driver.c | 14 +-
src/parallels/parallels_driver.c | 6 +-
src/phyp/phyp_driver.c | 6 +-
src/qemu/qemu_capabilities.c | 2 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 166 +++++-
src/qemu/qemu_command.h | 15 +
src/qemu/qemu_domain.c | 80 +++
src/qemu/qemu_domain.h | 5 +
src/qemu/qemu_driver.c | 29 ++
src/qemu/qemu_hotplug.c | 177 +++++++
src/qemu/qemu_hotplug.h | 6 +
src/qemu/qemu_migration.c | 14 +
src/qemu/qemu_monitor.c | 42 ++
src/qemu/qemu_monitor.h | 14 +
src/qemu/qemu_monitor_json.c | 122 +++++
src/qemu/qemu_monitor_json.h | 5 +
src/qemu/qemu_process.c | 4 +
src/uml/uml_driver.c | 9 +-
src/vbox/vbox_common.c | 6 +-
src/vmware/vmware_driver.c | 6 +-
src/vmx/vmx.c | 6 +-
src/xen/xen_driver.c | 7 +
src/xenapi/xenapi_driver.c | 9 +-
tests/domainschemadata/maxMemory.xml | 19 +
tests/qemucapabilitiesdata/caps_2.1.1-1.caps | 1 +
.../qemuxml2argv-memory-hotplug-dimm.args | 11 +
.../qemuxml2argv-memory-hotplug-dimm.xml | 50 ++
.../qemuxml2argv-memory-hotplug-nonuma.xml | 22 +
.../qemuxml2argv-memory-hotplug.args | 6 +
.../qemuxml2argv-memory-hotplug.xml | 34 ++
tests/qemuxml2argvtest.c | 6 +
tests/qemuxml2xmltest.c | 4 +
41 files changed, 1712 insertions(+), 23 deletions(-)
create mode 100644 tests/domainschemadata/maxMemory.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-dimm.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-dimm.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug-nonuma.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-memory-hotplug.xml
--
2.2.2
10 years, 2 months
[libvirt] [PATCH] qemu: fix memory leak while starting a guest
by Pavel Hrdina
In commit cc41c648 I've re-factored qemuMonitorFindBalloonObjectPath, but
missed that there is a memory leak. The "nextpath" variable is
overridden while looping in for cycle and we have to free it before next
cycle.
Signed-off-by: Pavel Hrdina <phrdina(a)redhat.com>
---
src/qemu/qemu_monitor.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index 6882a50..94495cd 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1067,6 +1067,7 @@ qemuMonitorFindObjectPath(qemuMonitorPtr mon,
}
ret = qemuMonitorFindObjectPath(mon, nextpath, name, path);
+ VIR_FREE(nextpath);
}
}
--
2.0.5
10 years, 2 months
[libvirt] [python][RFC] more python bulk stats API
by Francesco Romani
Hi,
I was wondering if there is room in libvirt-python for a couple of new APIs
which could make life easier for python developers.
It is not just a theoretical thing, when developing VDSM, part of oVirt, I
found myself repeating these patterns long enough.
1. return dict from getAllDomainStats and domainListGetStats
both bulk stats API returns on success a list of tuples, each tuple being (DomainReference, DictOfStats) ,
and I often find myself in need to translate them into a dict like {VMUUID:DictOfStats} , using some code like
def _translate(bulk_stats):
return dict((dom.UUIDString(), stats)
for dom, stats in bulk_stats)
So I'd like to suggest new APIs:
virConnection.getAllDomainStatsMap()
virConnection.domainListGetStatsMap()
which directly return the dict()s above; arguments should be like getAllDomainStats and domainListGetStats,
respectively.
2. get all the bulk stats from a single VM
it is trival and efficient to do this in C, but to do that in python means to use throwaway temporary lists,
one for the domain and one for the result
No big deal, but again, why to waste? I'd like to have
virDomain.getBulkStats(stats) # stats is like in getAllDomainStats
that should return just the DictOfStats on success.
For performance reasons, all of the above should be done in C.
Just in case, I have proof of concept code for all the above. I can post a tentative patch if maintainers like
these ideas.
Thanks,
Thoughts welcome
--
Francesco Romani
RedHat Engineering Virtualization R & D
Phone: 8261328
IRC: fromani
10 years, 2 months
[libvirt] [PATCH] network_conf: Forbid commas in DNS TXT
by Michal Privoznik
https://bugzilla.redhat.com/show_bug.cgi?id=1151942
While the restriction doesn't have origin in any RFC, it matters
to us while constructing the dnsmasq config file (or command line
previously). For better picture, this is how the corresponding
part of network XML look like:
<dns>
<forwarder addr='8.8.4.4'/>
<txt name='example' value='example value'/>
</dns>
And this is how the config file looks like then:
server=8.8.4.4
txt-record=example,example value
Now we can see why there can't be any commas in the TXT name.
They are used by dnsmasq to separate @name and @value.
Funny, we have it in the documentation, but the code (which was
pushed back in 2011) didn't reflect that.
Signed-off-by: Michal Privoznik <mprivozn(a)redhat.com>
---
src/conf/network_conf.c | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/src/conf/network_conf.c b/src/conf/network_conf.c
index dce3360..3d8bf05 100644
--- a/src/conf/network_conf.c
+++ b/src/conf/network_conf.c
@@ -1057,15 +1057,17 @@ virNetworkDNSTxtDefParseXML(const char *networkName,
virNetworkDNSTxtDefPtr def,
bool partialOkay)
{
+ const char *bad = " ,";
+
if (!(def->name = virXMLPropString(node, "name"))) {
virReportError(VIR_ERR_XML_DETAIL,
_("missing required name attribute in DNS TXT record "
"of network %s"), networkName);
goto error;
}
- if (strchr(def->name, ' ') != NULL) {
+ if (strcspn(def->name, bad) != strlen(def->name)) {
virReportError(VIR_ERR_XML_DETAIL,
- _("prohibited space character in DNS TXT record "
+ _("prohibited character in DNS TXT record "
"name '%s' of network %s"), def->name, networkName);
goto error;
}
--
2.0.5
10 years, 2 months
[libvirt] [PATCH] qemu: bulk stats: implement (cpu) tune group.
by Francesco Romani
Management applications, like oVirt, may need to setup cpu quota
limits to enforce QoS for VMs.
For this purpose, management applications also need to check how
VMs are behaving with respect to CPU quota. This data is avaialble
using the virDomainGetSchedulerParameters API.
This patch adds a new group to bulk stats API to obtain the same
information.
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1191428
---
include/libvirt/libvirt-domain.h | 1 +
src/libvirt-domain.c | 16 ++++++++
src/qemu/qemu_driver.c | 84 ++++++++++++++++++++++++++++++++++++++++
tools/virsh-domain-monitor.c | 7 ++++
4 files changed, 108 insertions(+)
diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h
index 4dbd7f5..3d8c6af 100644
--- a/include/libvirt/libvirt-domain.h
+++ b/include/libvirt/libvirt-domain.h
@@ -1700,6 +1700,7 @@ typedef enum {
VIR_DOMAIN_STATS_VCPU = (1 << 3), /* return domain virtual CPU info */
VIR_DOMAIN_STATS_INTERFACE = (1 << 4), /* return domain interfaces info */
VIR_DOMAIN_STATS_BLOCK = (1 << 5), /* return domain block info */
+ VIR_DOMAIN_STATS_TUNE_CPU = (1 << 6), /* return domain CPU tuning info */
} virDomainStatsTypes;
typedef enum {
diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c
index 492e90a..a4effa3 100644
--- a/src/libvirt-domain.c
+++ b/src/libvirt-domain.c
@@ -10990,6 +10990,22 @@ virConnectGetDomainCapabilities(virConnectPtr conn,
* "block.<num>.physical" - physical size in bytes of the container of the
* backing image as unsigned long long.
*
+ * VIR_DOMAIN_STATS_TUNE_CPU: Return CPU tuning statistics
+ * and usage information.
+ * The typed parameter keys are in this format:
+ * "tune.vcpu.quota" - max allowed bandwith, in microseconds, as
+ * long long integer. -1 means 'infinite'.
+ * "tune.vcpu.period" - timeframe on which the virtual cpu quota is
+ * enforced, in microseconds, as unsigned long long.
+ * "tune.emu.quota" - max allowd bandwith for emulator threads,
+ * in microseconds, as long long integer.
+ * -1 means 'infinite'.
+ * "tune.emu.period" - timeframe on which the emulator quota is
+ * enforced, in microseconds, as unsigned long long.
+ * "tune.cpu.shares" - weight of this VM. This value is meaningful
+ * only if compared with the other values of
+ * the running vms. Expressed as unsigned long long.
+ *
* Note that entire stats groups or individual stat fields may be missing from
* the output in case they are not supported by the given hypervisor, are not
* applicable for the current state of the guest domain, or their retrieval
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 26fc6a2..5548626 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -18797,6 +18797,89 @@ qemuDomainGetStatsBlock(virQEMUDriverPtr driver,
#undef QEMU_ADD_COUNT_PARAM
+
+#define QEMU_ADD_PARAM_LL(record, maxparams, name, value) \
+do { \
+ if (virTypedParamsAddLLong(&(record)->params, \
+ &(record)->nparams, \
+ maxparams, \
+ name, \
+ value) < 0) \
+ goto cleanup; \
+} while (0)
+
+#define QEMU_ADD_PARAM_ULL(record, maxparams, name, value) \
+do { \
+ if (virTypedParamsAddULLong(&(record)->params, \
+ &(record)->nparams, \
+ maxparams, \
+ name, \
+ value) < 0) \
+ goto cleanup; \
+} while (0)
+
+static int
+qemuDomainGetStatsCpuTune(virQEMUDriverPtr driver,
+ virDomainObjPtr dom,
+ virDomainStatsRecordPtr record,
+ int *maxparams,
+ unsigned int privflags ATTRIBUTE_UNUSED)
+{
+ int ret = -1;
+ unsigned long long shares = 0;
+ qemuDomainObjPrivatePtr priv = dom->privateData;
+ virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
+
+ if (!cfg->privileged ||
+ !virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPU)) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ if (virCgroupGetCpuShares(priv->cgroup, &shares) < 0) {
+ ret = 0;
+ goto cleanup;
+ }
+
+ QEMU_ADD_PARAM_ULL(record, maxparams, "tune.cpu.shares", shares);
+
+ if (virCgroupSupportsCpuBW(priv->cgroup)) {
+ unsigned long long period = 0;
+ long long quota = 0;
+ unsigned long long emulator_period = 0;
+ long long emulator_quota = 0;
+ int err;
+
+ err = qemuGetVcpusBWLive(dom, &period, "a);
+ if (!err) {
+ QEMU_ADD_PARAM_ULL(record, maxparams,
+ "tune.vcpu.period", period);
+ QEMU_ADD_PARAM_LL(record, maxparams,
+ "tune.vcpu.quota", quota);
+ }
+
+ err = qemuGetEmulatorBandwidthLive(dom, priv->cgroup,
+ &emulator_period, &emulator_quota);
+ if (!err) {
+ QEMU_ADD_PARAM_ULL(record, maxparams,
+ "tune.emu.period", emulator_period);
+ QEMU_ADD_PARAM_LL(record, maxparams,
+ "tune.emu.quota", emulator_quota);
+ }
+ }
+
+ ret = 0;
+
+ cleanup:
+ virObjectUnref(cfg);
+ return ret;
+}
+
+#undef QEMU_ADD_PARAM_LL
+
+#undef QEMU_ADD_PARAM_ULL
+
+
typedef int
(*qemuDomainGetStatsFunc)(virQEMUDriverPtr driver,
virDomainObjPtr dom,
@@ -18817,6 +18900,7 @@ static struct qemuDomainGetStatsWorker qemuDomainGetStatsWorkers[] = {
{ qemuDomainGetStatsVcpu, VIR_DOMAIN_STATS_VCPU, false },
{ qemuDomainGetStatsInterface, VIR_DOMAIN_STATS_INTERFACE, false },
{ qemuDomainGetStatsBlock, VIR_DOMAIN_STATS_BLOCK, true },
+ { qemuDomainGetStatsCpuTune, VIR_DOMAIN_STATS_TUNE_CPU, false },
{ NULL, 0, false }
};
diff --git a/tools/virsh-domain-monitor.c b/tools/virsh-domain-monitor.c
index 925eb1b..e425e43 100644
--- a/tools/virsh-domain-monitor.c
+++ b/tools/virsh-domain-monitor.c
@@ -1997,6 +1997,10 @@ static const vshCmdOptDef opts_domstats[] = {
.type = VSH_OT_BOOL,
.help = N_("report domain block device statistics"),
},
+ {.name = "tune-cpu",
+ .type = VSH_OT_BOOL,
+ .help = N_("report domain cpu scheduler parameters"),
+ },
{.name = "list-active",
.type = VSH_OT_BOOL,
.help = N_("list only active domains"),
@@ -2107,6 +2111,9 @@ cmdDomstats(vshControl *ctl, const vshCmd *cmd)
if (vshCommandOptBool(cmd, "block"))
stats |= VIR_DOMAIN_STATS_BLOCK;
+ if (vshCommandOptBool(cmd, "tune-cpu"))
+ stats |= VIR_DOMAIN_STATS_TUNE_CPU;
+
if (vshCommandOptBool(cmd, "list-active"))
flags |= VIR_CONNECT_GET_ALL_DOMAINS_STATS_ACTIVE;
--
2.1.0
10 years, 2 months