[libvirt] [PATCH V6] support offline migration
by liguang
original migration did not aware of offline case
so, add code to support offline migration quietly
(did not disturb original migration) by pass
VIR_MIGRATE_OFFLINE flag to migration APIs if the
domain is really inactive, and
migration process will not puzzeled by domain
offline and exit unexpectly.
these changes did not take care of disk images the
domain required, for disk images could be transfered
by other APIs as suggested.
so, the migration result is just make domain
definition alive at target side.
Signed-off-by: liguang <lig.fnst(a)cn.fujitsu.com>
---
include/libvirt/libvirt.h.in | 1 +
src/qemu/qemu_driver.c | 8 ++++++
src/qemu/qemu_migration.c | 55 ++++++++++++++++++++++++++++++++++++-----
src/qemu/qemu_migration.h | 3 +-
tools/virsh-domain.c | 6 ++++
5 files changed, 65 insertions(+), 8 deletions(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index cfe5047..77df2ab 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -995,6 +995,7 @@ typedef enum {
* whole migration process; this will be used automatically
* when supported */
VIR_MIGRATE_UNSAFE = (1 << 9), /* force migration even if it is considered unsafe */
+ VIR_MIGRATE_OFFLINE = (1 << 10), /* offline migrate */
} virDomainMigrateFlags;
/* Domain migration. */
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index b12d9bc..e70a5cc 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9641,6 +9641,8 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
}
if (!virDomainObjIsActive(vm)) {
+ if (flags & VIR_MIGRATE_OFFLINE)
+ goto offline;
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
@@ -9653,6 +9655,7 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
if (qemuDomainCheckEjectableMedia(driver, vm, asyncJob) < 0)
goto endjob;
+offline:
if (!(xml = qemuMigrationBegin(driver, vm, xmlin, dname,
cookieout, cookieoutlen,
flags)))
@@ -9888,6 +9891,11 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
goto cleanup;
}
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ ret = 0;
+ goto cleanup;
+ }
+
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_OUT))
goto cleanup;
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 1b21ef6..a54a26e 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -70,6 +70,7 @@ enum qemuMigrationCookieFlags {
QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT,
+ QEMU_MIGRATION_COOKIE_FLAG_OFFLINE,
QEMU_MIGRATION_COOKIE_FLAG_LAST
};
@@ -77,12 +78,13 @@ enum qemuMigrationCookieFlags {
VIR_ENUM_DECL(qemuMigrationCookieFlag);
VIR_ENUM_IMPL(qemuMigrationCookieFlag,
QEMU_MIGRATION_COOKIE_FLAG_LAST,
- "graphics", "lockstate", "persistent");
+ "graphics", "lockstate", "persistent", "offline");
enum qemuMigrationCookieFeatures {
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
QEMU_MIGRATION_COOKIE_LOCKSTATE = (1 << QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE),
QEMU_MIGRATION_COOKIE_PERSISTENT = (1 << QEMU_MIGRATION_COOKIE_FLAG_PERSISTENT),
+ QEMU_MIGRATION_COOKIE_OFFLINE = (1 << QEMU_MIGRATION_COOKIE_FLAG_OFFLINE),
};
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
@@ -439,6 +441,11 @@ qemuMigrationCookieXMLFormat(struct qemud_driver *driver,
virBufferAdjustIndent(buf, -2);
}
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+ virBufferAsprintf(buf, " <offline>\n");
+ virBufferAddLit(buf, " </offline>\n");
+ }
+
virBufferAddLit(buf, "</qemu-migration>\n");
return 0;
}
@@ -662,6 +669,12 @@ qemuMigrationCookieXMLParse(qemuMigrationCookiePtr mig,
VIR_FREE(nodes);
}
+ if ((flags & QEMU_MIGRATION_COOKIE_OFFLINE)) {
+ if (virXPathBoolean("count(./offline) > 0", ctxt)) {
+ mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
+ }
+ }
+
return 0;
error:
@@ -721,6 +734,10 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
qemuMigrationCookieAddPersistent(mig, dom) < 0)
return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+ mig->flags |= QEMU_MIGRATION_COOKIE_OFFLINE;
+ }
+
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
return -1;
@@ -1151,6 +1168,13 @@ char *qemuMigrationBegin(struct qemud_driver *driver,
QEMU_MIGRATION_COOKIE_LOCKSTATE) < 0)
goto cleanup;
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ if (qemuMigrationBakeCookie(mig, driver, vm,
+ cookieout, cookieoutlen,
+ QEMU_MIGRATION_COOKIE_OFFLINE) < 0)
+ goto cleanup;
+ }
+
if (xmlin) {
if (!(def = virDomainDefParseString(driver->caps, xmlin,
QEMU_EXPECTED_VIRT_TYPES,
@@ -1314,6 +1338,15 @@ qemuMigrationPrepareAny(struct qemud_driver *driver,
goto endjob;
}
+ if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
+ QEMU_MIGRATION_COOKIE_OFFLINE)))
+ return ret;
+
+ if (mig->flags & QEMU_MIGRATION_COOKIE_OFFLINE) {
+ ret = 0;
+ goto cleanup;
+ }
+
/* Start the QEMU daemon, with the same command-line arguments plus
* -incoming $migrateFrom
*/
@@ -1856,7 +1889,8 @@ qemuMigrationRun(struct qemud_driver *driver,
virLockManagerPluginGetName(driver->lockManager));
return -1;
}
-
+ if (flags & VIR_MIGRATE_OFFLINE)
+ return 0;
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen,
QEMU_MIGRATION_COOKIE_GRAPHICS)))
goto cleanup;
@@ -2372,6 +2406,8 @@ static int doPeer2PeerMigrate3(struct qemud_driver *driver,
qemuDomainObjExitRemoteWithDriver(driver, vm);
}
VIR_FREE(dom_xml);
+ if (flags & VIR_MIGRATE_OFFLINE)
+ goto cleanup;
if (ret == -1)
goto cleanup;
@@ -2477,7 +2513,7 @@ finish:
vm->def->name);
cleanup:
- if (ddomain) {
+ if (ddomain || (flags & VIR_MIGRATE_OFFLINE)) {
virObjectUnref(ddomain);
ret = 0;
} else {
@@ -2554,7 +2590,7 @@ static int doPeer2PeerMigrate(struct qemud_driver *driver,
}
/* domain may have been stopped while we were talking to remote daemon */
- if (!virDomainObjIsActive(vm)) {
+ if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
goto cleanup;
@@ -2617,7 +2653,7 @@ qemuMigrationPerformJob(struct qemud_driver *driver,
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto cleanup;
- if (!virDomainObjIsActive(vm)) {
+ if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
goto endjob;
@@ -2941,6 +2977,8 @@ qemuMigrationFinish(struct qemud_driver *driver,
*/
if (retcode == 0) {
if (!virDomainObjIsActive(vm)) {
+ if (flags & VIR_MIGRATE_OFFLINE)
+ goto offline;
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
goto endjob;
@@ -3038,7 +3076,7 @@ qemuMigrationFinish(struct qemud_driver *driver,
goto endjob;
}
}
-
+ offline:
dom = virGetDomain (dconn, vm->def->name, vm->def->uuid);
event = virDomainEventNewFromObj(vm,
@@ -3120,7 +3158,10 @@ int qemuMigrationConfirm(struct qemud_driver *driver,
if (!(mig = qemuMigrationEatCookie(driver, vm, cookiein, cookieinlen, 0)))
return -1;
-
+ if (flags & VIR_MIGRATE_OFFLINE) {
+ rv = 0;
+ goto cleanup;
+ }
/* Did the migration go as planned? If yes, kill off the
* domain object, but if no, resume CPUs
*/
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 1740204..2bcaea0 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -36,7 +36,8 @@
VIR_MIGRATE_NON_SHARED_DISK | \
VIR_MIGRATE_NON_SHARED_INC | \
VIR_MIGRATE_CHANGE_PROTECTION | \
- VIR_MIGRATE_UNSAFE)
+ VIR_MIGRATE_UNSAFE | \
+ VIR_MIGRATE_OFFLINE)
enum qemuMigrationJobPhase {
QEMU_MIGRATION_PHASE_NONE = 0,
diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c
index 4684466..ec25043 100644
--- a/tools/virsh-domain.c
+++ b/tools/virsh-domain.c
@@ -6525,6 +6525,7 @@ static const vshCmdOptDef opts_migrate[] = {
{"dname", VSH_OT_DATA, 0, N_("rename to new name during migration (if supported)")},
{"timeout", VSH_OT_INT, 0, N_("force guest to suspend if live migration exceeds timeout (in seconds)")},
{"xml", VSH_OT_STRING, 0, N_("filename containing updated XML for the target")},
+ {"offline", VSH_OT_BOOL, 0, N_("for offline migration")},
{NULL, 0, 0, NULL}
};
@@ -6591,6 +6592,11 @@ doMigrate(void *opaque)
if (vshCommandOptBool(cmd, "unsafe"))
flags |= VIR_MIGRATE_UNSAFE;
+ if (vshCommandOptBool(cmd, "offline")) {
+ if (!virDomainIsActive(dom))
+ flags |= VIR_MIGRATE_OFFLINE;
+ }
+
if (xmlfile &&
virFileReadAll(xmlfile, 8192, &xml) < 0) {
vshError(ctl, _("file '%s' doesn't exist"), xmlfile);
--
1.7.2.5
12 years, 3 months
[libvirt] [PATCH v2 0/9] improve virBitmap
by Hu Tao
In many places we store bitmap info in a chunk of data
(pointed to by a char *), and have redundant codes to
set/unset bits. This series extends virBitmap, and convert
those codes to use virBitmap.
changes:
v2:
- fix bug in qemuSetupCgroupForEmulator
- new function virBitmapNextSetBit
- virBitmapcmp -> virBitmapEqual
- virBitmap: store bits in little endian format
- some improvements of virBitmap
- fix some memory leaks
Hu Tao (9):
fix bug in qemuSetupCgroupForEmulator
New functions for virBitmap
use virBitmap to store cpupin info
use virBitmap to store cpu affinity info
use virBitmap to store numa nodemask info.
use virBitmap to store cpumask info.
use virBitmap to store cells' cpumask info.
use virBitmap to store nodeinfo.
remove virDomainCpuSetFormat and virDomainCpuSetParse
.gitignore | 1 +
src/conf/cpu_conf.c | 17 +-
src/conf/cpu_conf.h | 3 +-
src/conf/domain_conf.c | 392 ++++++---------------------------
src/conf/domain_conf.h | 18 +-
src/libvirt_private.syms | 14 +-
src/lxc/lxc_controller.c | 56 ++---
src/nodeinfo.c | 26 +--
src/nodeinfo.h | 6 +-
src/parallels/parallels_driver.c | 5 +-
src/qemu/qemu_cgroup.c | 18 +-
src/qemu/qemu_cgroup.h | 2 +-
src/qemu/qemu_command.c | 43 +---
src/qemu/qemu_driver.c | 168 +++++++-------
src/qemu/qemu_process.c | 141 ++++--------
src/test/test_driver.c | 5 +-
src/util/bitmap.c | 451 +++++++++++++++++++++++++++++++++++++-
src/util/bitmap.h | 34 +++
src/util/processinfo.c | 36 +--
src/util/processinfo.h | 9 +-
src/vmx/vmx.c | 36 +--
tests/Makefile.am | 7 +-
tests/cpuset | 2 +-
tests/virbitmaptest.c | 233 ++++++++++++++++++++
24 files changed, 1034 insertions(+), 689 deletions(-)
create mode 100644 tests/virbitmaptest.c
--
1.7.10.2
12 years, 3 months
[libvirt] [PATCH] python: Initialize new_params in virDomainSetSchedulerParameters
by Federico Simoncelli
The new_params variable must be initialized in case the
virDomainGetSchedulerParameters call fails and we hit the cleanup
section before actually allocating the new parameters.
Signed-off-by: Federico Simoncelli <fsimonce(a)redhat.com>
---
python/libvirt-override.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/python/libvirt-override.c b/python/libvirt-override.c
index bb1d881..485ed28 100644
--- a/python/libvirt-override.c
+++ b/python/libvirt-override.c
@@ -700,7 +700,7 @@ libvirt_virDomainSetSchedulerParameters(PyObject *self ATTRIBUTE_UNUSED,
int i_retval;
int nparams = 0;
Py_ssize_t size = 0;
- virTypedParameterPtr params, new_params;
+ virTypedParameterPtr params, new_params = NULL;
if (!PyArg_ParseTuple(args, (char *)"OO:virDomainSetScedulerParameters",
&pyobj_domain, &info))
--
1.7.1
12 years, 3 months
[libvirt] 1 of 56 tests failed
by fugui_li
=======================================
make[2]: *** [check-TESTS] Error 1
make[2]: Leaving directory `/home/work/libvirt/91021/libvirt-0.9.10/build/tests'
make[1]: *** [check-am] Error 2
make[1]: Leaving directory `/home/work/libvirt/91021/libvirt-0.9.10/build/tests'
make: *** [check-recursive] Error 1
exec 3>&-
test "$st" = 0
fugui_li
12 years, 3 months
[libvirt] [PATCH] docs: page.xsl: fix FAQ link in subdirectories
by Ján Tomko
Links to the FAQ didn't work on pages in subdirectories, like
devhelp/libvirt-virterror.html or internals/command.html, because
they have had href_base prepended to them.
---
docs/page.xsl | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/docs/page.xsl b/docs/page.xsl
index fc782a2..bc8ea2a 100644
--- a/docs/page.xsl
+++ b/docs/page.xsl
@@ -54,7 +54,7 @@
<xsl:when test="$pagename = a/@href">
<span class="{$class}"><xsl:value-of select="a"/></span>
</xsl:when>
- <xsl:when test="a/@href = 'http://wiki.libvirt.org'">
+ <xsl:when test="starts-with(a/@href, 'http://wiki.libvirt.org')">
<a title="{./span}" class="{$class}" href="{a/@href}"><xsl:value-of select="a"/></a>
</xsl:when>
<xsl:otherwise>
--
1.7.8.6
12 years, 3 months
[libvirt] [PATCHv1 0/4] Subject: [PATCHv1 0/4] qemu: Add sandbox support
by Ján Tomko
This series adds support to run QEMU with sandbox enabled, a syscall filter
using seccomp. It can be configured in qemu.conf to on, off, or the QEMU
default, which is off in 1.2. Default value is the QEMU default.
V1 of the patches supports tri-state configuration and includes a test for
detecting the capability.
Ján Tomko (4):
qemu: add capability flag for sandbox
qemu: conf: add sandbox option
qemu: add -sandbox to command line if requested
tests: add qemu-1.2.0 data
src/qemu/qemu.conf | 8 +
src/qemu/qemu_capabilities.c | 3 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 7 +
src/qemu/qemu_conf.c | 5 +
src/qemu/qemu_conf.h | 1 +
tests/qemuhelpdata/qemu-1.2.0 | 270 ++++++++++++++++++++++++++++++++++
tests/qemuhelpdata/qemu-1.2.0-device | 181 +++++++++++++++++++++++
tests/qemuhelptest.c | 82 ++++++++++
9 files changed, 558 insertions(+), 0 deletions(-)
create mode 100644 tests/qemuhelpdata/qemu-1.2.0
create mode 100644 tests/qemuhelpdata/qemu-1.2.0-device
--
1.7.8.6
12 years, 3 months
Re: [libvirt] problems using virt-manager
by 王金浦
2012/9/11 Lentes, Bernd <bernd.lentes(a)helmholtz-muenchen.de>
>
> Hi,
>
> i try to run virt-manager on a SLES 11 SP1 box. I'm using kernel 2.6.32.12
> and virt-manager 0.9.4-106.1.x86_64 .
> The system is a 64bit box.
>
> Here is the output:
> =========================
>
> pc56846:/media/idg2/SysAdmin_AG_Wurst/software_und_treiber/virt_manager/sles_11_sp1
> # virt-manager &
> [1] 9659
>
> pc56846:/media/idg2/SysAdmin_AG_Wurst/software_und_treiber/virt_manager/sles_11_sp1
> # Traceback (most recent call last):
> File "/usr/share/virt-manager/virt-manager.py", line 386, in <module>
> main()
> File "/usr/share/virt-manager/virt-manager.py", line 247, in main
> from virtManager import cli
> File "/usr/share/virt-manager/virtManager/cli.py", line 29, in <module>
> import libvirt
> File "/usr/lib64/python2.6/site-packages/libvirt.py", line 25, in
> <module>
> raise lib_e
> ImportError: /usr/lib64/libvirt.so.0: undefined symbol:
> selinux_virtual_domain_context_path
>
> [1]+ Exit 1 virt-manager
> =========================
>
Seems libvirt popup a importError, so libvir list may be a good place
to ask, add into cc.
Jack
12 years, 3 months
[libvirt] [PATCH] docs: hacking.html.in: fix table of contents
by Ján Tomko
Two sections didn't have a working link in the TOC.
---
docs/hacking.html.in | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/hacking.html.in b/docs/hacking.html.in
index eb79953..a97dc22 100644
--- a/docs/hacking.html.in
+++ b/docs/hacking.html.in
@@ -350,7 +350,7 @@
}
</pre>
- <h2><a href="types">Preprocessor</a></h2>
+ <h2><a name="preprocessor">Preprocessor</a></h2>
<p>
For variadic macros, stick with C99 syntax:
@@ -368,7 +368,7 @@
#endif
</pre>
- <h2><a href="types">C types</a></h2>
+ <h2><a name="types">C types</a></h2>
<p>
Use the right type.
--
1.7.8.6
12 years, 3 months
[libvirt] [libvirt-designer][PATCH 0/3] Cleanup
by Michal Privoznik
The first two patches are trivial,
the third would require some review.
Michal Privoznik (3):
disk_add: Don't hardcode 'qemu' driver
style: Use two blank lines between functions
virtxml: Drop direct libvirt usage
configure.ac | 24 +++++++++++++++++++--
examples/Makefile.am | 6 +++-
examples/virtxml.c | 24 ++++++++--------------
libvirt-designer/libvirt-designer-domain.c | 30 +++++++++++++++++++++++++++-
4 files changed, 63 insertions(+), 21 deletions(-)
--
1.7.8.6
12 years, 3 months
[libvirt] [PATCH 0/7 v4] Atomic API to list networks
by Osier Yang
v3 - v4:
- Just rebase on top, and split the API from the big set
Osier Yang (7):
list: Define new API virConnectListAllNetworks
list: Implement RPC calls for virConnectListAllNetworks
list: Add helpers to list network objects
list: Implement listAllNetworks for network driver
list: Implement listAllNetworks for test driver
list: Use virConnectListAllNetworks in virsh
list: Expose virConnectListAllNetworks to Python binding
daemon/remote.c | 55 +++++
include/libvirt/libvirt.h.in | 20 ++
python/generator.py | 1 +
python/libvirt-override-api.xml | 6 +
python/libvirt-override-virConnect.py | 12 ++
python/libvirt-override.c | 48 +++++
src/conf/network_conf.c | 91 +++++++++
src/conf/network_conf.h | 22 ++
src/driver.h | 5 +
src/libvirt.c | 86 ++++++++-
src/libvirt_private.syms | 1 +
src/libvirt_public.syms | 1 +
src/network/bridge_driver.c | 17 ++
src/remote/remote_driver.c | 64 ++++++
src/remote/remote_protocol.x | 13 ++-
src/remote_protocol-structs | 12 ++
src/test/test_driver.c | 17 ++
tools/virsh-network.c | 352 ++++++++++++++++++++++++--------
tools/virsh.pod | 12 +-
19 files changed, 743 insertions(+), 92 deletions(-)
--
1.7.7.3
12 years, 3 months