[libvirt] Fwd: There seems a deadlock in libvirt
by Chun-Hung Chen
Hi, all,
We were running OpenStack with Ubuntu and libvirt 0.9.10. We found that
libvirt monitor command not working well.
There were a lot of error in libvirtd.log like this
2013-02-07 06:07:39.000+0000: 18112: error :
qemuDomainObjBeginJobInternal:773 : Timed out during operation: cannot
acquire state change lock
We dig into libvirtd by strace and find one of the thread only have the
following command
futex(0x7f69ac0ec0ec, FUTEX_WAIT_PRIVATE, 2717, NULL
It seems this thread waiting for reply but nothing came back thus other
threads would wait for it. We also saw there is a function called
virCondWaitUntil(). Is it safe for us to modify the code from virCondWait()
to virCondWaitUntil() to prevent such deadlock scenario? Thanks.
Following is the gdb -p 'libvirt.pid' and 'thread id' and 'bt full'
#0 0x00007f69c8c1dd84 in pthread_cond_wait@(a)GLIBC_2.3.2 () from
/lib/x86_64-linux-gnu/libpthread.so.0
No symbol table info available.
#1 0x00007f69c9ee884a in virCondWait (c=<optimized out>, m=<optimized
out>) at util/threads-pthread.c:117
ret = <optimized out>
#2 0x000000000049c749 in qemuMonitorSend (mon=0x7f69ac0ec0c0,
msg=<optimized out>) at qemu/qemu_monitor.c:826
ret = -1
__func__ = "qemuMonitorSend"
__FUNCTION__ = "qemuMonitorSend"
#3 0x00000000004ac8ed in qemuMonitorJSONCommandWithFd (mon=0x7f69ac0ec0c0,
cmd=0x7f6998028280, scm_fd=-1, reply=0x7f69c57829f8)
at qemu/qemu_monitor_json.c:230
ret = -1
msg = {txFD = -1, txBuffer = 0x7f69980e9b00
"{\"execute\":\"query-balloon\",\"id\":\"libvirt-1359\"}\r\n", txOffset =
49, txLength = 49,
rxBuffer = 0x0, rxLength = 0, rxObject = 0x0, finished = false,
passwordHandler = 0, passwordOpaque = 0x0}
cmdstr = 0x7f69980ef2f0
"{\"execute\":\"query-balloon\",\"id\":\"libvirt-1359\"}"
id = 0x7f69980b0a20 "libvirt-1359"
exe = <optimized out>
__FUNCTION__ = "qemuMonitorJSONCommandWithFd"
__func__ = "qemuMonitorJSONCommandWithFd"
#4 0x00000000004ae794 in qemuMonitorJSONGetBalloonInfo
(mon=0x7f69ac0ec0c0, currmem=0x7f69c5782a48) at
qemu/qemu_monitor_json.c:1190
ret = <optimized out>
cmd = 0x7f6998028280
reply = 0x0
__FUNCTION__ = "qemuMonitorJSONGetBalloonInfo"
#5 0x0000000000457451 in qemudDomainGetInfo (dom=<optimized out>,
info=0x7f69c5782b50) at qemu/qemu_driver.c:2181
priv = 0x7f69a0093b00
driver = 0x7f69b80ca8e0
vm = 0x7f69a0093370
ret = -1
err = <optimized out>
balloon = <optimized out>
__FUNCTION__ = "qemudDomainGetInfo"
#6 0x00007f69c9f63eda in virDomainGetInfo (domain=0x7f69980e3650,
info=0x7f69c5782b50) at libvirt.c:4230
ret = <optimized out>
conn = <optimized out>
__func__ = "virDomainGetInfo"
__FUNCTION__ = "virDomainGetInfo"
#7 0x0000000000439bca in remoteDispatchDomainGetInfo (ret=0x7f6998000c20,
args=<optimized out>, rerr=0x7f69c5782c50, client=0x157e730,
server=<optimized out>, msg=<optimized out>) at remote_dispatch.h:1640
rv = -1
tmp = {state = 1 '\001', maxMem = 2097152, memory = 0, nrVirtCpu =
0, cpuTime = 5981880000000}
dom = 0x7f69980e3650
priv = <optimized out>
#8 remoteDispatchDomainGetInfoHelper (server=<optimized out>,
client=0x157e730, msg=<optimized out>, rerr=0x7f69c5782c50, args=<optimized
out>,
ret=0x7f6998000c20) at remote_dispatch.h:1616
__func__ = "remoteDispatchDomainGetInfoHelper"
#9 0x00007f69c9fbb915 in virNetServerProgramDispatchCall (msg=0x1689cc0,
client=0x157e730, server=0x1577c90, prog=0x15825d0)
at rpc/virnetserverprogram.c:416
ret = 0x7f6998000c20 ""
rv = -1
i = <optimized out>
arg = 0x7f6998027950 "\360e\n\230i\177"
dispatcher = 0x73de40
rerr = {code = 0, domain = 0, message = 0x0, level = 0, dom = 0x0,
str1 = 0x0, str2 = 0x0, str3 = 0x0, int1 = 0, int2 = 0, net = 0x0}
#10 virNetServerProgramDispatch (prog=0x15825d0, server=0x1577c90,
client=0x157e730, msg=0x1689cc0) at rpc/virnetserverprogram.c:289
ret = -1
rerr = {code = 0, domain = 0, message = 0x0, level = 0, dom = 0x0,
str1 = 0x0, str2 = 0x0, str3 = 0x0, int1 = 0, int2 = 0, net = 0x0}
__func__ = "virNetServerProgramDispatch"
__FUNCTION__ = "virNetServerProgramDispatch"
#11 0x00007f69c9fb6461 in virNetServerHandleJob (jobOpaque=<optimized out>,
opaque=0x1577c90) at rpc/virnetserver.c:164
srv = 0x1577c90
job = 0x155dfa0
__func__ = "virNetServerHandleJob"
#12 0x00007f69c9ee8e3e in virThreadPoolWorker (opaque=<optimized out>) at
util/threadpool.c:144
data = 0x0
pool = 0x1577d80
cond = 0x1577de0
priority = false
job = 0x162dd20
#13 0x00007f69c9ee84e6 in virThreadHelper (data=<optimized out>) at
util/threads-pthread.c:161
args = 0x0
local = {func = 0x7f69c9ee8d00 <virThreadPoolWorker>, opaque =
0x1559f90}
#14 0x00007f69c8c19e9a in start_thread () from
/lib/x86_64-linux-gnu/libpthread.so.0
No symbol table info available.
#15 0x00007f69c89474bd in clone () from /lib/x86_64-linux-gnu/libc.so.6
No symbol table info available.
#16 0x0000000000000000 in ?? ()
No symbol table info available.
Regards,
Chun-Hung
11 years, 8 months
[libvirt] [PATCH] build: force correct gcc syntax for attribute_nonnull
by Eric Blake
Gcc lets you do:
int ATTRIBUTE_NONNULL(1) foo(void *param);
int foo(void *param) ATTRIBUTE_NONNULL(1);
int ATTRIBUTE_NONNULL(1) foo(void *param) { ... }
but chokes on:
int foo(void *param) ATTRIBUTE_NONNULL(1) { ... }
However, since commit eefb881, we have intentionally been disabling
ATTRIBUTE_NONNULL because of lame gcc handling of the attribute (that
is, gcc doesn't do decent warning reporting, then compiles code that
mysteriously fails if you break the contract of the attribute, which
is surprisingly easy to do), leaving it on only for Coverity (which
does a much better job of improved static analysis when the attribute
is present).
But completely eliding the macro makes it too easy to write code that
uses the fourth syntax option, if you aren't using Coverity. So this
patch forces us to avoid syntax errors, even when not using the
attribute under gcc. It also documents WHY we disable the warning
under gcc, rather than forcing you to find the commit log.
* src/internal.h (ATTRIBUTE_NONNULL): Expand to empty attribute,
rather than nothing, when on gcc.
---
src/internal.h | 17 +++++++++++++++--
1 files changed, 15 insertions(+), 2 deletions(-)
diff --git a/src/internal.h b/src/internal.h
index ebc91c7..2cf4731 100644
--- a/src/internal.h
+++ b/src/internal.h
@@ -181,9 +181,22 @@
# endif
# endif
+/* gcc's handling of attribute nonnull is less than stellar - it does
+ * NOT improve diagnostics, and merely allows gcc to optimize away
+ * null code checks even when the caller manages to pass null in spite
+ * of the attribute, leading to weird crashes. Coverity, on the other
+ * hand, knows how to do better static analysis based on knowing
+ * whether a parameter is nonnull. Make this attribute conditional
+ * based on whether we are compiling for real or for analysis, while
+ * still requiring correct gcc syntax when it is turned off. See also
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17308 */
# ifndef ATTRIBUTE_NONNULL
-# if __GNUC_PREREQ (3, 3) && STATIC_ANALYSIS
-# define ATTRIBUTE_NONNULL(m) __attribute__((__nonnull__(m)))
+# if __GNUC_PREREQ (3, 3)
+# if STATIC_ANALYSIS
+# define ATTRIBUTE_NONNULL(m) __attribute__((__nonnull__(m)))
+# else
+# define ATTRIBUTE_NONNULL(m) __attribute__(())
+# endif
# else
# define ATTRIBUTE_NONNULL(m)
# endif
--
1.7.1
11 years, 8 months
[libvirt] [PATCH 0/4] interface: udev backend bond support
by Doug Goldstein
This patchset primarily aims to add support for bond devices which was
lacking from the exiting code. This closes out support for all interface
device types. The patchset also refactors to code to make it a bit easier
to follow hopefully.
Doug Goldstein (4):
interface: Refactor udev bridge to helper func
interface: Refactor interface vlan to helper func
interface: Refactor udev backend device type id
interface: add bond support to udev backend
src/interface/interface_backend_udev.c | 456 ++++++++++++++++++++++++++-------
1 file changed, 359 insertions(+), 97 deletions(-)
--
1.7.12.4
11 years, 8 months
[libvirt] [PATCH] qemu: pass "-1" as uid/gid for unprivileged qemu
by Guido Günther
so we don't try to change uid/git to 0 when probing capabilities.
---
On Fri, Feb 15, 2013 at 11:20:17PM -0600, Doug Goldstein wrote:
> The following error bisect's down to this commit when running out of
> my local checkout for testing.
>
> 2013-02-16 05:16:55.102+0000: 29992: error : virCommandWait:2270 :
> internal error Child process (LC_ALL=C
> LD_LIBRARY_PATH=/home/cardoe/work/libvirt/src/.libs
> PATH=/usr/local/bin:/usr/bin:/bin:/opt/bin:/usr/x86_64-pc-linux-gnu/gcc-bin/4.6.3:/usr/games/bin
> HOME=/home/cardoe USER=cardoe LOGNAME=cardoe /usr/bin/qemu-kvm -help)
> unexpected exit status 1: libvir: error : internal error cannot apply
> process capabilities -1
This is due to:
debug : virExec:641 : Setting child uid:gid to 0:0 with caps 0
We're trying to change capabilities even on qemu:///session. This
unbreaks the libvirt-tck jenkins test suite as well.
src/qemu/qemu_driver.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 23499ef..dc35b91 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -556,6 +556,8 @@ qemuStartup(bool privileged,
char *membase = NULL;
char *mempath = NULL;
virQEMUDriverConfigPtr cfg;
+ uid_t run_uid = -1;
+ gid_t run_gid = -1;
if (VIR_ALLOC(qemu_driver) < 0)
return -1;
@@ -707,11 +709,13 @@ qemuStartup(bool privileged,
cfg->snapshotDir, cfg->user, cfg->group);
goto error;
}
+ run_uid = cfg->user;
+ run_gid = cfg->group;
}
qemu_driver->qemuCapsCache = virQEMUCapsCacheNew(cfg->libDir,
- cfg->user,
- cfg->group);
+ run_uid,
+ run_gid);
if (!qemu_driver->qemuCapsCache)
goto error;
--
1.7.10.4
11 years, 8 months
[libvirt] [PATCH] Add capabilities bit for -no-kvm-pit-reinjection
by Doug Goldstein
The conversion to qemuCaps dropped the ability with qemu{,-kvm} 1.2 and
newer to set the lost tick policy for the PIT. While the
-no-kvm-pit-reinjection option is depreacated, it is still supported at
least through 1.4, it is better to not lose the functionality.
---
src/qemu/qemu_capabilities.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 51fc9dc..af52bbf 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -2284,6 +2284,7 @@ virQEMUCapsInitQMPBasic(virQEMUCapsPtr qemuCaps)
virQEMUCapsSet(qemuCaps, QEMU_CAPS_NO_USER_CONFIG);
virQEMUCapsSet(qemuCaps, QEMU_CAPS_NETDEV_BRIDGE);
virQEMUCapsSet(qemuCaps, QEMU_CAPS_SECCOMP_SANDBOX);
+ virQEMUCapsSet(qemuCaps, QEMU_CAPS_NO_KVM_PIT);
}
--
1.7.12.4
11 years, 8 months
[libvirt] Questions about the schemas
by Gene Czarcinski
This is not in any way critical but simply trying to get a better idea
of the purposes of docs/schemas/*
I recently submitted a patch which needed me to look at and modify some
of the schema files [adding a client-id specification to dhcp/host]. As
I looked them over I wondered what the purpose was of the schemas. If
you ran virt-xml-validate (which uses xmllint) and the result is that
your file is not validated, then there is a problem but it is not clear
just what the problem is. On the other hand, if you run
virt-xml-validate and it says the your file is OK (validated), that only
means that it did not find a problem but not that the file is OK.
For example, the schema says it is valid to specify an IPv6 address in
an IPv4 definition. The implementing software if kick it out but the
schema says it is OK.
Another example is the (host) name under the dhcp/host definition. The
current schema says this is required but dnsmasq does not require a
host-name specification [I submitted a trivial patch to make it optional].
So, if the purpose is to kick out obvious errors, this it is OK as is.
If the purpose to to be definitiive, then more work needs to be done.
My opinion is that the current situation is "good enough" as long as
valid elements/specifications are not treated as errors.
Gene
11 years, 8 months
[libvirt] [PATCHv2 00/15] Permit setting capabilities for uid!=0 processes
by Laine Stump
(and also properly clear capabilities bounding set for child
processes)
This replaces the earlier version of the same patches:
https://www.redhat.com/archives/libvir-list/2013-February/msg00446.html
Many of these patches were already ACKed in the first version. If I
haven't modified them (other than rebasing) I note that in the
subject. Other patches have been modified based on Dan and Eric's
reviews of V1.
Also, since I first posted the series, we've had a talk with some
kernel people who are amenable to modify the semantics of
CAP_COMPROMISE_KERNEL such that it's only checked on open(), not on
read or write. If this is done, we will (thankfully!) no longer need
to set CAP_COMPROMISE_KERNEL for the qemu process. However, the first
14 patches in this series are still useful. Two big changes are: 1) it
allows setting CAP_SYS_RAWIO when needed for generic scsi command
passthrough, and 2) the bounding set of child processes will now be
properly cleared (it previously wasn't, even though the code looked
like it *should* have been doing so).
Here is the blurb from V1's intro:
There are a bunch of patches here, but each is small and
single-purpose to make reviewing easier (and also so that any
potential regressions can be easily bisected).
The original purpose of the patches was to permit setting
CAP_COMPROMISE_KERNEL for non-root qemu processes, since Fedora 18 now
requires that in order for generic PCI passthrough to work (the
alternative was to always run qemu as root). Although we may not
actually want to do that part (if we can convince kernel people to
implement CAP_COMPROMISE_KERNEL such that it's only required when
*opening* the necessary sysfs file (done by libvirt), rather than for
every read/write (done by qemu), then we will not need
CAP_COMPROMISE_KERNEL for qemu), but that is just a couple lines in
the final patch, and the rest of the series is still useful, as it
make dropping/keeping caps truly work for non-root child processes -
this has never before been the case. (for example, CAP_SYS_RAWIO is
needed for generic scsi passthrough to work, and until now the only
way to have that was to run *all* qemus as root).
A bit higher level description of what I've done with all the patches:
1) remove the programmable "hook" from virExecWithHook(), since that
function was only called from one place, and always with the same hook
function. Rename virExecWithHook() to virExec(), and replace the call
to that hook with inline code.
2) give virCommand an API to set the intended uid/gid of the command
that's going to be run, and use that instead of a "user hook" where
appropriate (in the process completely eliminating two hook
functions).
3) Also add an API to virCommand to do the final "set the process
label" step for selinux/apparmor.
4) Add a new API to the security driver (and use it from qemu) called
virSecurityManagerSetChildProcessLabel() which a) is called prior to
virCommandRun() rather than from a command "hook" function, b) takes a
virCommand, and c) rather than immediately performing the operation
(as virSecurityManagerSetProcessLabel() did), merely stores the
necessary information in the virCommand so that virExec can perform
the operation (setting selinux label, setuid/gid, etc)
5) make a new function combining the setting of uid/gid and
maintaining of capabilities, because that is the only way you can set
uid!=0 and still maintain capabilities. Use this in virExec()
6) *Finally* set the CAP_COMPROMISE_KERNEL capability unconditionally
for all qemu processes. (If we really do have to do this, we may want
to consider making it a qemu.conf setting).
Laine Stump (15):
util: eliminate generic hook from virExecWithHook
util: eliminate extra args from virExec
util: refactor virCommandHook into virExec and
virCommandHandshakeChild
util: add virCommandSetUID and virCommandSetGID
util: make virSetUIDGID a NOP only when uid or gid is -1
qemu: replace exec hook with virCommandSetUID/GID in qemuCaps*
qemu: replace exec hook with virCommandSetUID/GID in storage_backend
build: define SECDRIVER_LIBS in Makefile.am
util: add security label setting to virCommand
security: add new virSecurityManagerSetChildProcessLabel API
qemu: let virCommand set child process security labels/uid/gid
util: drop capabilities immediately after changing uid/gid of child
util: virSetUIDGIDWithCaps - change uid while keeping caps
util: maintain caps when running command with uid != 0
qemu: set CAP_COMPROMISE_KERNEL so that pci passthrough works
src/Makefile.am | 36 ++-
src/libvirt_private.syms | 6 +
src/qemu/qemu_capabilities.c | 64 ++---
src/qemu/qemu_process.c | 23 +-
src/security/security_apparmor.c | 42 +++-
src/security/security_dac.c | 24 +-
src/security/security_driver.h | 6 +-
src/security/security_manager.c | 13 +-
src/security/security_manager.h | 6 +-
src/security/security_nop.c | 10 +-
src/security/security_selinux.c | 32 +++
src/security/security_stack.c | 20 +-
src/storage/storage_backend.c | 28 +--
src/util/vircommand.c | 523 ++++++++++++++++++++-------------------
src/util/vircommand.h | 12 +-
src/util/virutil.c | 115 ++++++++-
src/util/virutil.h | 1 +
17 files changed, 595 insertions(+), 366 deletions(-)
--
1.8.1
11 years, 8 months
[libvirt] [PATCH v3 00/12] Rework storage migration
by Michal Privoznik
This patch set re-implements migration with storage for enough new qemu.
Currently, you can migrate a domain to a host without need for shared storage.
This is done by setting 'blk' or 'inc' attribute (representing
VIR_MIGRATE_NON_SHARED_DISK and VIR_MIGRATE_NON_SHARED_INC flags respectively)
of 'migrate' monitor command. However, the qemu implementation is
buggy and applications are advised to switch to new impementation
which, moreover, offers some nice features, like migrating only explicitly
specified disks.
The new functionality is controlled via 'nbd-server-*' and 'drive-mirror'
commands. The flow is meant to look like this:
1) User invokes libvirt's migrate functionality.
2) libvirt checks that no block jobs are active on the source.
3) libvirt starts the destination QEMU and sets up the NBD server using the
nbd-server-start and nbd-server-add commands.
4) libvirt starts drive-mirror with a destination pointing to the remote NBD
server, for example nbd:host:port:exportname=diskname (where diskname is the
-drive id specified on the destination).
5) once all mirroring jobs reach steady state, libvirt invokes the migrate
command.
6) once migration completed, libvirt invokes the nbd-server-stop command on the
destination QEMU.
If we just skip the 2nd step and there is an active block-job, qemu will fail in
step 4. No big deal.
Since we try to NOT break migration and keep things compatible, this feature is
enabled iff both sides support it. Since there's obvious need for some data
transfer between src and dst, I've put it into qemuCookieMigration:
1) src -> dest: (QEMU_MIGRATION_PHASE_BEGIN3 -> QEMU_MIGRATION_PHASE_PREPARE)
<nbd>
<disk target='vda' size='15032385536'/>
<disk target='vdb' size='11534336'/>
<disk target='vdc' size='13631488'/>
</nbd>
The source is telling the destination it supports the NBD feature. Moreover,
which disks are to be transferred and what are their sizes. That's because
disks needs to be fully allocated (even qcow) for successful transfer.
2) dst -> src: (QEMU_MIGRATION_PHASE_PREPARE -> QEMU_MIGRATION_PHASE_PERFORM3)
<nbd port='5901'/>
The destination is confirming it does support NBD as well. All disks were
pre-created and NBD server is listening on given port (5901 in this case).
If either src or dst doesn't support NBD, it is not used and whole process falls
back to old implementation.
diff to v1:
-Eric's and Daniel's suggestions worked in. To point out the bigger ones:
don't do NBD style when TUNNELLED requested, added 'b:writable' to
'nbd-server-add'
-drop '/qemu-migration/nbd/disk/@src' attribute from migration cookie.
As pointed out by Jirka, disk->src can be changed during migration (e.g. by
migration hook or by passed xml). So I've tried (as suggested on the list)
passing disk alias. However, since qemu hasn't been started on destination yet,
the aliases hasn't been generated yet. So we have to rely on ordering
completely.
diff to v2:
-rebase to reflect changes made by offline migration patch
-send initial nbd cookie only if needed
diff to v2.1:
-nbd cookie reworked
-don't rely on disk ordering in the cookie, but use disk target for that
-adapt to virPortAllocator
-unlink pre-created storage on migration fail
-other of Jirka's suggestions worked in
Michal Privoznik (12):
qemu: Introduce NBD_SERVER capability
Introduce NBD migration cookie
qemu: Introduce nbd-server-start command
qemu: Introduce nbd-server-add command
qemu: Introduce nbd-server-stop command
qemu_migration: Introduce qemuMigrationStartNBDServer()
qemu_migration: Introduce qemuMigrationDriveMirror
qemu_domain: Introduce qemuDomainGetDiskBlockInfo
qemu_migration: Check size prerequisites
qemu_migration: Stop NBD server at Finish phase
qemu_migration: Cancel running jobs on failed migration
qemu_migration: Unlink pre-created storage on error
src/qemu/qemu_capabilities.c | 4 +-
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_domain.c | 157 ++++++++-
src/qemu/qemu_domain.h | 7 +
src/qemu/qemu_driver.c | 124 +------
src/qemu/qemu_migration.c | 769 ++++++++++++++++++++++++++++++++++++++++++-
src/qemu/qemu_monitor.c | 63 ++++
src/qemu/qemu_monitor.h | 7 +
src/qemu/qemu_monitor_json.c | 102 ++++++
src/qemu/qemu_monitor_json.h | 7 +
src/qemu/qemu_process.c | 13 +
11 files changed, 1118 insertions(+), 136 deletions(-)
--
1.8.0.2
11 years, 8 months
[libvirt] [test-API][PATCH] Add 7 memory API related cases
by Wayne Sun
add 7 new cases using domain memory related API
add 1 conf for domain memory testing
7 new cases are:
memory_params_config: test set memory params with config flag
memory_params_live: test set memory params with live flag
memory_peek: test memory peek
memory_stats: test get memory stats
set_maxmem_config: test set maximum memory with config flag
set_memory_config: test set current memory with config flag
set_memory_live: test set current memory with live flag
memory hotplug is not supported yet, so live set max memory case
is not added.
Signed-off-by: Wayne Sun <gsun(a)redhat.com>
---
cases/domain_memory_test.conf | 99 +++++++++++++++++++++++++++++++
repos/domain/memory_params_config.py | 96 ++++++++++++++++++++++++++++++
repos/domain/memory_params_live.py | 109 +++++++++++++++++++++++++++++++++++
repos/domain/memory_peek.py | 48 +++++++++++++++
repos/domain/memory_stats.py | 65 +++++++++++++++++++++
repos/domain/set_maxmem_config.py | 59 +++++++++++++++++++
repos/domain/set_memory_config.py | 94 ++++++++++++++++++++++++++++++
repos/domain/set_memory_live.py | 86 +++++++++++++++++++++++++++
8 files changed, 656 insertions(+)
create mode 100644 cases/domain_memory_test.conf
create mode 100644 repos/domain/memory_params_config.py
create mode 100644 repos/domain/memory_params_live.py
create mode 100644 repos/domain/memory_peek.py
create mode 100644 repos/domain/memory_stats.py
create mode 100644 repos/domain/set_maxmem_config.py
create mode 100644 repos/domain/set_memory_config.py
create mode 100644 repos/domain/set_memory_live.py
diff --git a/cases/domain_memory_test.conf b/cases/domain_memory_test.conf
new file mode 100644
index 0000000..90879ab
--- /dev/null
+++ b/cases/domain_memory_test.conf
@@ -0,0 +1,99 @@
+domain:install_linux_cdrom
+ guestname
+ $defaultname
+ guestos
+ $defaultos
+ guestarch
+ $defaultarch
+ vcpu
+ $defaultvcpu
+ memory
+ $defaultmem
+ hddriver
+ $defaulthd
+ nicdriver
+ $defaultnic
+ macaddr
+ 54:52:00:4a:c1:22
+
+domain:balloon_memory
+ guestname
+ $defaultname
+ memorypair
+ 1024,2048
+
+domain:destroy
+ guestname
+ $defaultname
+
+domain:memory_params_config
+ guestname
+ $defaultname
+ hard_limit
+ 0
+ soft_limit
+ 9007199254740991
+ swap_hard_limit
+ -1
+
+domain:set_maxmem_config
+ guestname
+ $defaultname
+ memory
+ 16777216
+
+domain:set_memory_config
+ guestname
+ $defaultname
+ memory
+ 1048576
+ maxmem
+ 4194304
+
+domain:start
+ guestname
+ $defaultname
+
+domain:memory_stats
+ guestname
+ $defaultname
+
+domain:memory_peek
+ guestname
+ $defaultname
+
+domain:memory_params_live
+ guestname
+ $defaultname
+ hard_limit
+ 25417224
+ soft_limit
+ 9007199254740900
+ swap_hard_limit
+ -1
+
+domain:set_memory_live
+ guestname
+ $defaultname
+ memory
+ 2097152
+ username
+ $username
+ password
+ $password
+
+domain:set_memory_config
+ guestname
+ $defaultname
+ memory
+ 4194304
+
+domain:destroy
+ guestname
+ $defaultname
+
+domain:undefine
+ guestname
+ $defaultname
+
+options cleanup=enable
diff --git a/repos/domain/memory_params_config.py b/repos/domain/memory_params_config.py
new file mode 100644
index 0000000..af9781b
--- /dev/null
+++ b/repos/domain/memory_params_config.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# Test set domain memory parameters with flag
+# VIR_DOMAIN_AFFECT_CONFIG
+
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('guestname', 'hard_limit', 'soft_limit', 'swap_hard_limit', )
+optional_params = {}
+
+UNLIMITED = 9007199254740991
+
+def get_memory_config(domobj, param_dict):
+ """get domain config memory parameters
+ """
+ new_dict = {}
+ try:
+ guestxml = domobj.XMLDesc(2)
+ logger.debug("domain %s xml is :\n%s" %(domobj.name(), guestxml))
+ xml = minidom.parseString(guestxml)
+ logger.info("get domain memory parameters in config xml")
+ for i in param_dict.keys():
+ if xml.getElementsByTagName(i):
+ limit_element = xml.getElementsByTagName(i)[0]
+ limit = int(limit_element.childNodes[0].data)
+ logger.info("%s in config xml is: %s" % (i, limit))
+ new_dict[i] = limit
+ else:
+ logger.info("%s is not in config xml" % i)
+ new_dict[i] = 0
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return False
+
+ return new_dict
+
+def memory_params_config(params):
+ """set domain memory parameters with config flag and check
+ """
+ global logger
+ logger = params['logger']
+ guestname = params['guestname']
+ hard_limit = int(params['hard_limit'])
+ soft_limit = int(params['soft_limit'])
+ swap_hard_limit = int(params['swap_hard_limit'])
+
+ logger.info("the name of virtual machine is %s" % guestname)
+ param_dict = {'hard_limit': hard_limit,
+ 'soft_limit': soft_limit,
+ 'swap_hard_limit': swap_hard_limit
+ }
+
+ for i in param_dict.keys():
+ if param_dict[i] == -1:
+ param_dict[i] = UNLIMITED
+
+ logger.info("the param dict for setting is %s" % param_dict)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
+ logger.info("set %s memory parameters with flag: %s" %
+ (guestname, flags))
+ domobj.setMemoryParameters(param_dict, flags)
+ logger.info("set memory parameters done")
+
+ logger.info("get %s memory parameters with flag: %s" %
+ (guestname, flags))
+ ret = domobj.memoryParameters(flags)
+ logger.info("%s memory parameters is %s" % (guestname, ret))
+
+ if ret == param_dict:
+ logger.info("memory parameters is as expected")
+ else:
+ logger.error("memory parameters is not as expected")
+ return 1
+
+ ret = get_memory_config(domobj, param_dict)
+ if ret == param_dict:
+ logger.info("memory parameters is as expected in config xml")
+ else:
+ logger.error("memory parameters is not as expected in config xml")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/memory_params_live.py b/repos/domain/memory_params_live.py
new file mode 100644
index 0000000..68a71b2
--- /dev/null
+++ b/repos/domain/memory_params_live.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+# Test set domain memory parameters with flag
+# VIR_DOMAIN_AFFECT_LIVE
+
+import os
+import math
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('guestname', 'hard_limit', 'soft_limit', 'swap_hard_limit', )
+optional_params = {}
+
+UNLIMITED = 9007199254740991
+CGROUP_PATH = "/cgroup/memory/libvirt/qemu"
+
+def get_cgroup_setting(guestname):
+ """get domain memory parameters in cgroup
+ """
+ if os.path.exists(CGROUP_PATH):
+ cgroup_path = "%s/%s" % (CGROUP_PATH, guestname)
+ else:
+ cgroup_path = "/sys/fs%s/%s" % (CGROUP_PATH, guestname)
+
+ f = open("%s/memory.limit_in_bytes" % cgroup_path)
+ hard = int(f.read())
+ logger.info("memory.limit_in_bytes value is %s" % hard)
+
+ f = open("%s/memory.soft_limit_in_bytes" % cgroup_path)
+ soft = int(f.read())
+ logger.info("memory.soft_limit_in_bytes value is %s" % soft)
+
+ f = open("%s/memory.memsw.limit_in_bytes" % cgroup_path)
+ swap = int(f.read())
+ logger.info("memory.memsw.limit_in_bytes value is %s" % swap)
+
+ new_dict = {'hard_limit': hard/1024,
+ 'soft_limit': soft/1024,
+ 'swap_hard_limit': swap/1024
+ }
+ logger.debug("memory parameters dict get from cgroup is %s" % new_dict)
+
+ return new_dict
+
+def memory_params_live(params):
+ """set domain memory parameters with live flag and check
+ """
+ global logger
+ logger = params['logger']
+ guestname = params['guestname']
+ hard_limit = int(params['hard_limit'])
+ soft_limit = int(params['soft_limit'])
+ swap_hard_limit = int(params['swap_hard_limit'])
+
+ logger.info("the name of virtual machine is %s" % guestname)
+ param_dict = {'hard_limit': hard_limit,
+ 'soft_limit': soft_limit,
+ 'swap_hard_limit': swap_hard_limit
+ }
+
+ for i in param_dict.keys():
+ if param_dict[i] == -1:
+ param_dict[i] = UNLIMITED
+
+ logger.info("the param dict for setting is %s" % param_dict)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ flags = libvirt.VIR_DOMAIN_AFFECT_LIVE
+ logger.info("get %s memory parameters with flag: %s" %
+ (guestname, flags))
+ ret_pre = domobj.memoryParameters(flags)
+ logger.info("%s memory parameters is %s" % (guestname, ret_pre))
+
+ logger.info("set %s memory parameters with flag: %s" %
+ (guestname, flags))
+ domobj.setMemoryParameters(param_dict, flags)
+ logger.info("set memory parameters done")
+
+ logger.info("get %s memory parameters with flag: %s" %
+ (guestname, flags))
+ ret_pos = domobj.memoryParameters(flags)
+ logger.info("%s memory parameters is %s" % (guestname, ret_pos))
+
+ if ret_pos == param_dict:
+ logger.info("memory parameters is as expected")
+ else:
+ logger.error("memory parameters is not as expected")
+ return 1
+
+ logger.info("check memory parameters in cgroup")
+ ret = get_cgroup_setting(guestname)
+ for i in param_dict.keys():
+ if math.fabs(param_dict[i] - ret[i]) > 1:
+ logger.error("%s value not match with cgroup setting" % i)
+ return 1
+
+ logger.info("memory parameters is as expected in cgroup setting")
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/memory_peek.py b/repos/domain/memory_peek.py
new file mode 100644
index 0000000..de1ff87
--- /dev/null
+++ b/repos/domain/memory_peek.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# Test domain memory peek
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('guestname', )
+optional_params = {}
+
+def memory_peek(params):
+ """domain memory peek
+ """
+ logger = params['logger']
+ guestname = params['guestname']
+
+ flag_dict = {'1':"VIR_MEMORY_VIRTUAL", '2':"VIR_MEMORY_PHYSICAL"}
+
+ logger.info("the name of virtual machine is %s" % guestname)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ logger.info("test memory peek API")
+ for flag in flag_dict.keys():
+ logger.info("using flag: %s" % flag_dict[flag])
+ mem = domobj.memoryPeek(0, 0, int(flag))
+ if mem:
+ return 1
+ logger.info("memory peek API works fine with flag: %s" %
+ flag_dict[flag])
+
+ logger.info("peek 8 bytes from domain memory")
+ for flag in flag_dict.keys():
+ logger.info("using flag: %s" % flag_dict[flag])
+ mem = domobj.memoryPeek(0, 8, int(flag))
+ if not mem:
+ return 1
+ logger.info("8 bytes start with 0 with flag %s is: %s" %
+ (flag_dict[flag], mem))
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/memory_stats.py b/repos/domain/memory_stats.py
new file mode 100644
index 0000000..5de4028
--- /dev/null
+++ b/repos/domain/memory_stats.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Test get domain memory stats
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+from utils import utils
+
+required_params = ('guestname', )
+optional_params = {}
+
+VIRSH = "virsh qemu-monitor-command"
+
+def get_memory_actual(guestname):
+ """get memory stats with virsh commands
+ """
+ qmp_actual = -1
+ cmd ="%s %s '{ \"execute\": \"query-balloon\" }'" % (VIRSH, guestname)
+ logger.info("check memory stats with virsh command: %s" % cmd)
+ ret, out = utils.exec_cmd(cmd, shell=True)
+ out_dict = eval(out[0])
+ if out_dict.has_key('return'):
+ if out_dict['return'].has_key('actual'):
+ qmp_actual = out_dict['return']['actual']
+ else:
+ return False
+
+ if qmp_actual == -1:
+ return False
+
+ logger.info("the memory actal is: %s" % qmp_actual)
+ return qmp_actual
+
+def memory_stats(params):
+ """get domain memory stats
+ """
+ global logger
+ logger = params['logger']
+ guestname = params['guestname']
+
+ logger.info("the name of virtual machine is %s" % guestname)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ mem = domobj.memoryStats()
+ logger.info("%s memory stats is: %s" % (guestname, mem))
+ ret = get_memory_actual(guestname)
+ if not ret:
+ logger.error("get memory actual with qmp command failed")
+ return 1
+
+ if ret == mem['actual']*1024:
+ logger.info("actual memory is equal to output of qmp command")
+ else:
+ logger.error("actual memory is not equal to output of qmp command")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/set_maxmem_config.py b/repos/domain/set_maxmem_config.py
new file mode 100644
index 0000000..262d7b1
--- /dev/null
+++ b/repos/domain/set_maxmem_config.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+# Test set domain max memory with API setMaxMemory.
+
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('guestname', 'memory', )
+optional_params = {}
+
+def set_maxmem_config(params):
+ """set domain max memory, check with config xml and
+ maxMemory API
+ """
+ global logger
+ logger = params['logger']
+ guestname = params['guestname']
+ memory = int(params['memory'])
+
+ logger.info("the name of virtual machine is %s" % guestname)
+ logger.info("the given max memory value is %s" % memory)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ logger.info("set domain max memory as %s" % memory)
+ domobj.setMaxMemory(memory)
+
+ guestxml = domobj.XMLDesc(2)
+ logger.debug("domain %s xml is :\n%s" %(guestname, guestxml))
+ xml = minidom.parseString(guestxml)
+ mem = xml.getElementsByTagName('memory')[0]
+ maxmem = int(mem.childNodes[0].data)
+ logger.info("domain max memory in config xml is: %s" % maxmem)
+ if maxmem == memory:
+ logger.info("max memory in domain config xml is equal to set")
+ else:
+ logger.error("set max memory failed")
+ return 1
+
+ maxmem = domobj.maxMemory()
+ logger.info("max memory got by maxMemory API is: %s" % maxmem)
+ if maxmem == memory:
+ logger.info("max memory got by maxMemory API is equal to set")
+ else:
+ logger.error("set max memory failed")
+ return 1
+
+ logger.info("set max memory succeed")
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/set_memory_config.py b/repos/domain/set_memory_config.py
new file mode 100644
index 0000000..c8ef6c3
--- /dev/null
+++ b/repos/domain/set_memory_config.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# Test set domain balloon memory with flag VIR_DOMAIN_AFFECT_CONFIG
+# or VIR_DOMAIN_VCPU_MAXIMUM, depend on which optional param is
+# given.
+
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('guestname', )
+optional_params = {'memory': 1048576,
+ 'maxmem': 4194304,
+ }
+def get_memory_config(domobj):
+ """get domain config current memory and max memory
+ """
+ try:
+ guestxml = domobj.XMLDesc(2)
+ logger.debug("domain %s xml is :\n%s" %(domobj.name(), guestxml))
+ xml = minidom.parseString(guestxml)
+
+ logger.info("get domain memory info in config xml")
+ mem = xml.getElementsByTagName('currentMemory')[0]
+ current = int(mem.childNodes[0].data)
+ logger.info("current memory in config xml is: %s" % current)
+
+ mem = xml.getElementsByTagName('memory')[0]
+ max_memory = int(mem.childNodes[0].data)
+ logger.info("max memory in config xml is: %s" % max_memory)
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return False
+
+ return current, max_memory
+
+def set_memory_config(params):
+ """set domain memory with live flag and check
+ """
+ global logger
+ logger = params['logger']
+ guestname = params['guestname']
+ memory = params.get('memory', None)
+ maxmem = params.get('maxmem', None)
+
+ logger.info("the name of virtual machine is %s" % guestname)
+ if memory == None and maxmem == None:
+ logger.error("at least one of memory or maxmem should be provided")
+ return 1
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ if memory:
+ memory = int(memory)
+ flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
+ logger.info("set domain memory as %s with flag: %s" %
+ (memory, flags))
+ domobj.setMemoryFlags(memory, flags)
+ ret = get_memory_config(domobj)
+ if not ret:
+ return 1
+
+ if ret[0] == memory:
+ logger.info("set current memory succeed")
+ else:
+ logger.error("set current memory failed")
+ return 1
+
+ if maxmem:
+ maxmem = int(maxmem)
+ flags = libvirt.VIR_DOMAIN_MEM_MAXIMUM
+ logger.info("set domain max memory as %s with flag: %s" %
+ (maxmem, flags))
+ domobj.setMemoryFlags(maxmem, flags)
+ ret = get_memory_config(domobj)
+ if not ret:
+ return 1
+
+ if ret[1] == maxmem:
+ logger.info("set max memory succeed")
+ else:
+ logger.error("set max memory failed")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/domain/set_memory_live.py b/repos/domain/set_memory_live.py
new file mode 100644
index 0000000..f7c77f9
--- /dev/null
+++ b/repos/domain/set_memory_live.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Test set domain balloon memory with flag VIR_DOMAIN_AFFECT_LIVE.
+# Check domain info and inside domain to get current memory value.
+# The live flag only work on running domain, so test on shutoff
+# domain will fail.
+
+import time
+import math
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+from utils import utils
+
+required_params = ('guestname', 'memory', 'username', 'password', )
+optional_params = {}
+
+def compare_memory(expect_memory, actual_memory):
+ """ comparing expected memory size with actual memory size """
+
+ logger.info("expected memory size is %s" % expect_memory)
+ logger.info("actual memory size is %s" % actual_memory)
+ diff = int(expect_memory) - int(actual_memory)
+
+ if math.fabs(diff)/expect_memory < 0.05:
+ return 0
+ else:
+ return 1
+
+def get_current_memory(guestname, username, password):
+ """get domain current memory inside domain
+ """
+ logger.debug("get the mac address of vm %s" % guestname)
+ mac = utils.get_dom_mac_addr(guestname)
+ logger.debug("the mac address of vm %s is %s" % (guestname, mac))
+ ip = utils.mac_to_ip(mac, 180)
+ current = utils.get_remote_memory(ip, username, password)
+
+ return current
+
+def set_memory_live(params):
+ """set domain memory with live flag and check
+ """
+ global logger
+ logger = params['logger']
+ params.pop('logger')
+ guestname = params['guestname']
+ memory = int(params['memory'])
+ username = params['username']
+ password = params['password']
+
+ logger.info("the name of virtual machine is %s" % guestname)
+ logger.info("the given memory value is %s" % memory)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ domobj = conn.lookupByName(guestname)
+ logger.info("set domain memory as %s with flag: %s" %
+ (memory, libvirt.VIR_DOMAIN_AFFECT_LIVE))
+ domobj.setMemoryFlags(memory, libvirt.VIR_DOMAIN_AFFECT_LIVE)
+ logger.info("get domain current memory")
+ time.sleep(3)
+ dominfo = domobj.info()
+ logger.debug("domain info list is: %s" % dominfo)
+ logger.info("domain current memory value is: %s KiB" % dominfo[2])
+ if memory == dominfo[2]:
+ logger.info("set memory match with domain info")
+ else:
+ logger.error("set memory not match with domain info")
+ return 1
+
+ logger.info("check domain memory inside domain")
+ ret = get_current_memory(guestname, username, password)
+ if not compare_memory(memory, ret):
+ logger.info("set domain memory succeed")
+ else:
+ logger.error("set domain memory failed")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
--
1.8.1
11 years, 8 months