Re: [libvirt] Libvirt Application Development Guide
by Kevin Walker
Hi All
I am currently getting up to speed with the libvirt api, but have noticed
there are some big gaps in the Application Development Guide available on
the libvirt website.
Is there a more recent version of this document available?
Kind regards
Kevin Walker
+968 97651742
9 years, 3 months
[libvirt] [PATCH] rpc: RH1026137: Fix slow volume download (virsh vol-download)
by Ossi Herrala
Use I/O vector (iovec) instead of one huge memory buffer as suggested
in https://bugzilla.redhat.com/show_bug.cgi?id=1026137#c7. This avoids
doing memmove() to big buffers and performance doesn't degrade if
source (virNetClientStreamQueuePacket()) is faster than sink
(virNetClientStreamRecvPacket()).
---
src/rpc/virnetclientstream.c | 134 +++++++++++++++++++++++++----------------
1 files changed, 82 insertions(+), 52 deletions(-)
diff --git a/src/rpc/virnetclientstream.c b/src/rpc/virnetclientstream.c
index b428f4b..18c6e8b 100644
--- a/src/rpc/virnetclientstream.c
+++ b/src/rpc/virnetclientstream.c
@@ -49,9 +49,9 @@ struct _virNetClientStream {
* time by stopping consuming any incoming data
* off the socket....
*/
- char *incoming;
- size_t incomingOffset;
- size_t incomingLength;
+ struct iovec *incomingVec; /* I/O Vector to hold data */
+ size_t writeVec; /* Vectors produced */
+ size_t readVec; /* Vectors consumed */
bool incomingEOF;
virNetClientStreamEventCallback cb;
@@ -86,9 +86,9 @@ virNetClientStreamEventTimerUpdate(virNetClientStreamPtr st)
if (!st->cb)
return;
- VIR_DEBUG("Check timer offset=%zu %d", st->incomingOffset, st->cbEvents);
+ VIR_DEBUG("Check timer readVec %zu writeVec %zu %d", st->readVec, st->writeVec, st->cbEvents);
- if (((st->incomingOffset || st->incomingEOF) &&
+ if ((((st->readVec < st->writeVec) || st->incomingEOF) &&
(st->cbEvents & VIR_STREAM_EVENT_READABLE)) ||
(st->cbEvents & VIR_STREAM_EVENT_WRITABLE)) {
VIR_DEBUG("Enabling event timer");
@@ -110,13 +110,14 @@ virNetClientStreamEventTimer(int timer ATTRIBUTE_UNUSED, void *opaque)
if (st->cb &&
(st->cbEvents & VIR_STREAM_EVENT_READABLE) &&
- (st->incomingOffset || st->incomingEOF))
+ ((st->readVec < st->writeVec) || st->incomingEOF))
events |= VIR_STREAM_EVENT_READABLE;
if (st->cb &&
(st->cbEvents & VIR_STREAM_EVENT_WRITABLE))
events |= VIR_STREAM_EVENT_WRITABLE;
- VIR_DEBUG("Got Timer dispatch %d %d offset=%zu", events, st->cbEvents, st->incomingOffset);
+ VIR_DEBUG("Got Timer dispatch %d %d readVec %zu writeVec %zu", events, st->cbEvents,
+ st->readVec, st->writeVec);
if (events) {
virNetClientStreamEventCallback cb = st->cb;
void *cbOpaque = st->cbOpaque;
@@ -161,7 +162,7 @@ void virNetClientStreamDispose(void *obj)
virNetClientStreamPtr st = obj;
virResetError(&st->err);
- VIR_FREE(st->incoming);
+ VIR_FREE(st->incomingVec);
virObjectUnref(st->prog);
}
@@ -265,38 +266,49 @@ int virNetClientStreamQueuePacket(virNetClientStreamPtr st,
virNetMessagePtr msg)
{
int ret = -1;
- size_t need;
+ struct iovec iov;
+ char *base;
+ size_t piece, pieces, length, offset = 0, size = 1024*1024;
virObjectLock(st);
- need = msg->bufferLength - msg->bufferOffset;
- if (need) {
- size_t avail = st->incomingLength - st->incomingOffset;
- if (need > avail) {
- size_t extra = need - avail;
- if (VIR_REALLOC_N(st->incoming,
- st->incomingLength + extra) < 0) {
- VIR_DEBUG("Out of memory handling stream data");
- goto cleanup;
- }
- st->incomingLength += extra;
- }
- memcpy(st->incoming + st->incomingOffset,
- msg->buffer + msg->bufferOffset,
- msg->bufferLength - msg->bufferOffset);
- st->incomingOffset += (msg->bufferLength - msg->bufferOffset);
- } else {
+ length = msg->bufferLength - msg->bufferOffset;
+
+ if (length == 0) {
st->incomingEOF = true;
+ goto end;
}
- VIR_DEBUG("Stream incoming data offset %zu length %zu EOF %d",
- st->incomingOffset, st->incomingLength,
- st->incomingEOF);
+ pieces = (length + size - 1) / size;
+ for (piece = 0; piece < pieces; piece++) {
+ if (size > length - offset)
+ size = length - offset;
+
+ if (VIR_ALLOC_N(base, size)) {
+ VIR_DEBUG("Allocation failed");
+ goto cleanup;
+ }
+
+ memcpy(base, msg->buffer + msg->bufferOffset + offset, size);
+ iov.iov_base = base;
+ iov.iov_len = size;
+ offset += size;
+
+ if (VIR_APPEND_ELEMENT(st->incomingVec, st->writeVec, iov) < 0) {
+ VIR_DEBUG("Append failed");
+ VIR_FREE(base);
+ goto cleanup;
+ }
+ VIR_DEBUG("Wrote piece of vector. readVec %zu, writeVec %zu size %zu", st->readVec, st->writeVec, size);
+ }
+
+ end:
virNetClientStreamEventTimerUpdate(st);
-
ret = 0;
cleanup:
+ VIR_DEBUG("Stream incoming data readVec %zu writeVec %zu EOF %d",
+ st->readVec, st->writeVec, st->incomingEOF);
virObjectUnlock(st);
return ret;
}
@@ -361,17 +373,21 @@ int virNetClientStreamRecvPacket(virNetClientStreamPtr st,
size_t nbytes,
bool nonblock)
{
- int rv = -1;
+ int ret = -1;
+ size_t partial, offset;
+
+ virObjectLock(st);
+
VIR_DEBUG("st=%p client=%p data=%p nbytes=%zu nonblock=%d",
st, client, data, nbytes, nonblock);
- virObjectLock(st);
- if (!st->incomingOffset && !st->incomingEOF) {
+
+ if ((st->readVec >= st->writeVec) && !st->incomingEOF) {
virNetMessagePtr msg;
- int ret;
+ int rv;
if (nonblock) {
VIR_DEBUG("Non-blocking mode and no data available");
- rv = -2;
+ ret = -2;
goto cleanup;
}
@@ -387,37 +403,51 @@ int virNetClientStreamRecvPacket(virNetClientStreamPtr st,
VIR_DEBUG("Dummy packet to wait for stream data");
virObjectUnlock(st);
- ret = virNetClientSendWithReplyStream(client, msg, st);
+ rv = virNetClientSendWithReplyStream(client, msg, st);
virObjectLock(st);
virNetMessageFree(msg);
- if (ret < 0)
+ if (rv < 0)
goto cleanup;
}
- VIR_DEBUG("After IO %zu", st->incomingOffset);
- if (st->incomingOffset) {
- int want = st->incomingOffset;
- if (want > nbytes)
- want = nbytes;
- memcpy(data, st->incoming, want);
- if (want < st->incomingOffset) {
- memmove(st->incoming, st->incoming + want, st->incomingOffset - want);
- st->incomingOffset -= want;
+ offset = 0;
+ partial = nbytes;
+
+ while (st->incomingVec && (st->readVec < st->writeVec)) {
+ struct iovec *iov = st->incomingVec + st->readVec;
+
+ if (!iov || !iov->iov_base) {
+ VIR_DEBUG("NULL pointer");
+ goto cleanup;
+ }
+
+ if (partial < iov->iov_len) {
+ memcpy(data+offset, iov->iov_base, partial);
+ memmove(iov->iov_base, (char*)iov->iov_base+partial, iov->iov_len-partial);
+ iov->iov_len -= partial;
+ offset += partial;
+ VIR_DEBUG("Consumed %zu, left %zu", partial, iov->iov_len);
+ break;
} else {
- VIR_FREE(st->incoming);
- st->incomingOffset = st->incomingLength = 0;
+ memcpy(data+offset, iov->iov_base, iov->iov_len);
+ VIR_DEBUG("Consumed %zu. Moving to next piece", iov->iov_len);
+ partial -= iov->iov_len;
+ offset += iov->iov_len;
+ VIR_FREE(iov->iov_base);
+ iov->iov_len = 0;
+ st->readVec++;
}
- rv = want;
- } else {
- rv = 0;
+
+ VIR_DEBUG("Read piece of vector. read %zu readVec %zu, writeVec %zu", offset, st->readVec, st->writeVec);
}
+ ret = offset;
virNetClientStreamEventTimerUpdate(st);
cleanup:
virObjectUnlock(st);
- return rv;
+ return ret;
}
--
1.7.1
9 years, 3 months
[libvirt] [PATCH] qemu: fix some api cannot work when disable cpuset in conf
by Luyao Huang
https://bugzilla.redhat.com/show_bug.cgi?id=1244664
If user disable cpuset in qemu.conf, we shouldn't
try to use it, also shouldn't make some command which
can work without cpuset cannot work.
Fix these case:
1. start guest with strict numa policy (we can use libnuma help us).
2. Hot add vcpu.
3. hot add iothread.
Signed-off-by: Luyao Huang <lhuang(a)redhat.com>
---
src/qemu/qemu_cgroup.c | 16 ++++++++--------
src/qemu/qemu_driver.c | 10 +++++++---
2 files changed, 15 insertions(+), 11 deletions(-)
diff --git a/src/qemu/qemu_cgroup.c b/src/qemu/qemu_cgroup.c
index 8ed74ee..640a223 100644
--- a/src/qemu/qemu_cgroup.c
+++ b/src/qemu/qemu_cgroup.c
@@ -1028,10 +1028,6 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
if (virCgroupAddTask(cgroup_vcpu, priv->vcpupids[i]) < 0)
goto cleanup;
- if (mem_mask &&
- virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
- goto cleanup;
-
if (period || quota) {
if (qemuSetupCgroupVcpuBW(cgroup_vcpu, period, quota) < 0)
goto cleanup;
@@ -1041,6 +1037,10 @@ qemuSetupCgroupForVcpu(virDomainObjPtr vm)
if (virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
virBitmapPtr cpumap = NULL;
+ if (mem_mask &&
+ virCgroupSetCpusetMems(cgroup_vcpu, mem_mask) < 0)
+ goto cleanup;
+
/* try to use the default cpu maps */
if (vm->def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
cpumap = priv->autoCpuset;
@@ -1205,15 +1205,15 @@ qemuSetupCgroupForIOThreads(virDomainObjPtr vm)
goto cleanup;
}
- if (mem_mask &&
- virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0)
- goto cleanup;
-
/* Set iothreadpin in cgroup if iothreadpin xml is provided */
if (virCgroupHasController(priv->cgroup,
VIR_CGROUP_CONTROLLER_CPUSET)) {
virBitmapPtr cpumask = NULL;
+ if (mem_mask &&
+ virCgroupSetCpusetMems(cgroup_iothread, mem_mask) < 0)
+ goto cleanup;
+
if (def->iothreadids[i]->cpumask)
cpumask = def->iothreadids[i]->cpumask;
else if (def->placement_mode == VIR_DOMAIN_CPU_PLACEMENT_MODE_AUTO)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index f352a88..bb7cef4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4597,7 +4597,9 @@ qemuDomainAddCgroupForThread(virCgroupPtr cgroup,
if (virCgroupNewThread(cgroup, nameval, idx, true, &new_cgroup) < 0)
return NULL;
- if (mem_mask && virCgroupSetCpusetMems(new_cgroup, mem_mask) < 0)
+ if (mem_mask &&
+ virCgroupHasController(cgroup, VIR_CGROUP_CONTROLLER_CPUSET) &&
+ virCgroupSetCpusetMems(new_cgroup, mem_mask) < 0)
goto error;
/* Add pid/thread to the cgroup */
@@ -4653,7 +4655,8 @@ qemuDomainHotplugPinThread(virBitmapPtr cpumask,
{
int ret = -1;
- if (cgroup) {
+ if (cgroup &&
+ virCgroupHasController(cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (qemuSetupCgroupCpusetCpus(cgroup, cpumask) < 0) {
virReportError(VIR_ERR_OPERATION_INVALID,
_("failed to set cpuset.cpus in cgroup for id %d"),
@@ -4896,7 +4899,8 @@ qemuDomainSetVcpusFlags(virDomainPtr dom, unsigned int nvcpus,
if (virDomainObjGetDefs(vm, flags, &def, &persistentDef) < 0)
goto endjob;
- if (def && !(flags & VIR_DOMAIN_VCPU_GUEST) && virNumaIsAvailable()) {
+ if (def && !(flags & VIR_DOMAIN_VCPU_GUEST) && virNumaIsAvailable() &&
+ virCgroupHasController(priv->cgroup, VIR_CGROUP_CONTROLLER_CPUSET)) {
if (virCgroupNewThread(priv->cgroup, VIR_CGROUP_THREAD_EMULATOR, 0,
false, &cgroup_temp) < 0)
goto endjob;
--
1.8.3.1
9 years, 3 months
[libvirt] [PATCH 0/2] Added waiting for DAD to finish for bridge address.
by Maxim Perevedentsev
This is a fix for commit db488c79173b240459c7754f38c3c6af9b432970
dnsmasq main process which is relied on when waiting for DAD to complete
exits without actually waiting for DAD. This is dnsmasq daemon's task.
It seems to be a race that DAD finished before dnsmasq main process exited.
The above commit needs the execution to block until DAD finishes
for bridge IPv6 address because then it closes dummy tap device.
Thus we need to ensure this ourselves.
So we periodically poll the kernel using netlink and
check whether there are any IPv6 addresses assigned to bridge
which have 'tentative' state. After DAD is finished, execution continues.
I guess that is what dnsmasq was assumed to do.
We use netlink to dump information about existing IPv6 addresses. Netlink's
response is a multi-part message. Unfortunately, the current implementation
of virNetlink treats such messages as faulty and throws an error. So the patch 2/2
adds multi-part nelink response support.
Maxim Perevedentsev (2):
network: added waiting for DAD to finish for bridge address.
netlink: add support for multi-part netlink messages.
src/network/bridge_driver.c | 109 +++++++++++++++++++++++++++++++++++++++++++-
src/util/virnetlink.c | 4 +-
2 files changed, 111 insertions(+), 2 deletions(-)
--
Sincerely,
Maxim Perevedentsev
9 years, 3 months
[libvirt] Replace AF_LOCAL with AF_PACKET
by Ben Gray
Hi,
I've a question on whether it's 'safe' to change the socket type
used in virNetxxx calls from AF_PACKET to AF_LOCAL ?
The reason I ask is that we're using libvirt-lxc with a couple of
bridge interfaces, and we've found that the socket close call on
AF_PACKET type sockets takes between 40ms and 60ms. For our container
config there is roughly 12 close calls on AF_PACKET sockets, delaying
the start-up of the LXC container by around 450ms.
So a simple fix to speed up our container start-up is to just
switch from AF_PACKET to AF_LOCAL sockets. Hence my question on whether
we can safely do this, or is there some reason why AF_PACKET was chosen ?
Thanks in advance,
Ben.
FWIW - I believe that AF_PACKET sockets take so long to close is because
the kernel packet driver calls synchronize_net() when closing.
9 years, 3 months
[libvirt] [PATCH 0/4] cpu: Rename {powerpc,ppc} => ppc64
by Andrea Bolognani
The naming of files and symbols belonging to the ppc64 CPU
driver was all over the place: this series brings
inner-peace-inducing consistency to that corner of libvirt
via a series of straightforward string replacements.
More substantial changes coming next.
Andrea Bolognani (4):
cpu: Rename {powerpc,ppc} => ppc64 (filesystem)
cpu: Rename {powerpc,ppc} => ppc64 (exported symbols)
cpu: Rename {powerpc,ppc} => ppc64 (internal symbols)
cpu: Indentation changes in the ppc64 driver
po/POTFILES.in | 2 +-
src/Makefile.am | 5 +-
src/cpu/cpu.c | 4 +-
src/cpu/cpu.h | 4 +-
src/cpu/cpu_powerpc.c | 711 ----------------------------------------------
src/cpu/cpu_powerpc.h | 32 ---
src/cpu/cpu_ppc64.c | 712 +++++++++++++++++++++++++++++++++++++++++++++++
src/cpu/cpu_ppc64.h | 32 +++
src/cpu/cpu_ppc64_data.h | 33 +++
src/cpu/cpu_ppc_data.h | 33 ---
10 files changed, 785 insertions(+), 783 deletions(-)
delete mode 100644 src/cpu/cpu_powerpc.c
delete mode 100644 src/cpu/cpu_powerpc.h
create mode 100644 src/cpu/cpu_ppc64.c
create mode 100644 src/cpu/cpu_ppc64.h
create mode 100644 src/cpu/cpu_ppc64_data.h
delete mode 100644 src/cpu/cpu_ppc_data.h
--
2.4.3
9 years, 3 months
[libvirt] [PATCH] qemu: Reject migration with memory-hotplug if destination doesn't support it
by Peter Krempa
If destination libvirt doesn't support memory hotplug since all the
support was introduced by adding new elements the destination would
attempt to start qemu with an invalid configuration. The worse part is
that qemu might hang in such situation.
Fix this by sending a required migration feature called 'memory-hotplug'
to the destination. If the destination doesn't recognize it it will fail
the migration.
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1248350
---
src/qemu/qemu_migration.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f5866c4..824126f 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -86,6 +86,7 @@ enum qemuMigrationCookieFlags {
QEMU_MIGRATION_COOKIE_FLAG_NETWORK,
QEMU_MIGRATION_COOKIE_FLAG_NBD,
QEMU_MIGRATION_COOKIE_FLAG_STATS,
+ QEMU_MIGRATION_COOKIE_FLAG_MEMORY_HOTPLUG,
QEMU_MIGRATION_COOKIE_FLAG_LAST
};
@@ -98,7 +99,8 @@ VIR_ENUM_IMPL(qemuMigrationCookieFlag,
"persistent",
"network",
"nbd",
- "statistics");
+ "statistics",
+ "memory-hotplug");
enum qemuMigrationCookieFeatures {
QEMU_MIGRATION_COOKIE_GRAPHICS = (1 << QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS),
@@ -107,6 +109,7 @@ enum qemuMigrationCookieFeatures {
QEMU_MIGRATION_COOKIE_NETWORK = (1 << QEMU_MIGRATION_COOKIE_FLAG_NETWORK),
QEMU_MIGRATION_COOKIE_NBD = (1 << QEMU_MIGRATION_COOKIE_FLAG_NBD),
QEMU_MIGRATION_COOKIE_STATS = (1 << QEMU_MIGRATION_COOKIE_FLAG_STATS),
+ QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG = (1 << QEMU_MIGRATION_COOKIE_FLAG_MEMORY_HOTPLUG),
};
typedef struct _qemuMigrationCookieGraphics qemuMigrationCookieGraphics;
@@ -1352,6 +1355,9 @@ qemuMigrationBakeCookie(qemuMigrationCookiePtr mig,
qemuMigrationCookieAddStatistics(mig, dom) < 0)
return -1;
+ if (flags & QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG)
+ mig->flagsMandatory |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;
+
if (!(*cookieout = qemuMigrationCookieXMLFormatStr(driver, mig)))
return -1;
@@ -2974,6 +2980,11 @@ qemuMigrationBeginPhase(virQEMUDriverPtr driver,
}
}
+ if (vm->def->mem.max_memory ||
+ (vm->newDef &&
+ vm->newDef->mem.max_memory))
+ cookieFlags |= QEMU_MIGRATION_COOKIE_MEMORY_HOTPLUG;
+
if (!(mig = qemuMigrationEatCookie(driver, vm, NULL, 0, 0)))
goto cleanup;
--
2.4.5
9 years, 3 months
[libvirt] [PATCH v2] qemu: Do not reset labels when migration fails
by Jiri Denemark
When stopping a domain on the destination host after a failed migration,
we need to avoid reseting security labels since the domain is still
running on the source host. While we were correctly doing so in some
cases, there were still some paths which did this wrong.
https://bugzilla.redhat.com/show_bug.cgi?id=1242904
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
Notes:
Version 2:
- fix qemuProcessStop call in qemuProcessReconnect
src/qemu/qemu_driver.c | 5 ++++-
src/qemu/qemu_migration.c | 3 ++-
src/qemu/qemu_process.c | 19 ++++++++++++++-----
3 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 5b22639..b9278f8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2187,6 +2187,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
int ret = -1;
virObjectEventPtr event = NULL;
qemuDomainObjPrivatePtr priv;
+ unsigned int stopFlags = 0;
virCheckFlags(VIR_DOMAIN_DESTROY_GRACEFUL, -1);
@@ -2200,6 +2201,8 @@ qemuDomainDestroyFlags(virDomainPtr dom,
qemuDomainSetFakeReboot(driver, vm, false);
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
/* We need to prevent monitor EOF callback from doing our work (and sending
* misleading events) while the vm is unlocked inside BeginJob/ProcessKill API
@@ -2234,7 +2237,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
goto endjob;
}
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, 0);
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, stopFlags);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f5866c4..364c489 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3463,7 +3463,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
stop:
virDomainAuditStart(vm, "migrated", false);
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ VIR_QEMU_PROCESS_STOP_MIGRATED);
endjob:
qemuMigrationJobFinish(driver, vm);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 23baa82..694c5cd 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -285,6 +285,7 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
int eventReason = VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN;
int stopReason = VIR_DOMAIN_SHUTOFF_SHUTDOWN;
const char *auditReason = "shutdown";
+ unsigned int stopFlags = 0;
VIR_DEBUG("Received EOF on %p '%s'", vm, vm->def->name);
@@ -310,14 +311,16 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
auditReason = "failed";
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
+ }
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
eventReason);
- qemuProcessStop(driver, vm, stopReason, 0);
+ qemuProcessStop(driver, vm, stopReason, stopFlags);
virDomainAuditStop(vm, auditReason);
if (!vm->persistent) {
@@ -3732,10 +3735,13 @@ qemuProcessReconnect(void *opaque)
virQEMUDriverConfigPtr cfg;
size_t i;
int ret;
+ unsigned int stopFlags = 0;
VIR_FREE(data);
qemuDomainObjRestoreJob(obj, &oldjob);
+ if (oldjob.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
cfg = virQEMUDriverGetConfig(driver);
priv = obj->privateData;
@@ -3916,7 +3922,7 @@ qemuProcessReconnect(void *opaque)
* really is and FAILED means "failed to start" */
state = VIR_DOMAIN_SHUTOFF_UNKNOWN;
}
- qemuProcessStop(driver, obj, state, 0);
+ qemuProcessStop(driver, obj, state, stopFlags);
}
if (!obj->persistent)
@@ -5693,9 +5699,13 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = dom->privateData;
virObjectEventPtr event = NULL;
+ unsigned int stopFlags = 0;
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
+
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
@@ -5708,8 +5718,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
VIR_DEBUG("Killing domain");
- qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
- VIR_QEMU_PROCESS_STOP_MIGRATED);
+ qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
--
2.5.0
9 years, 3 months
[libvirt] [PATCH] qemu: Do not reset labels when migration fails
by Jiri Denemark
When stopping a domain on the destination host after a failed migration,
we need to avoid reseting security labels since the domain is still
running on the source host. While we were correctly doing so in some
cases, there were still some paths which did this wrong.
https://bugzilla.redhat.com/show_bug.cgi?id=1242904
Signed-off-by: Jiri Denemark <jdenemar(a)redhat.com>
---
src/qemu/qemu_driver.c | 5 ++++-
src/qemu/qemu_migration.c | 3 ++-
src/qemu/qemu_process.c | 14 ++++++++++----
3 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 5b22639..b9278f8 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2187,6 +2187,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
int ret = -1;
virObjectEventPtr event = NULL;
qemuDomainObjPrivatePtr priv;
+ unsigned int stopFlags = 0;
virCheckFlags(VIR_DOMAIN_DESTROY_GRACEFUL, -1);
@@ -2200,6 +2201,8 @@ qemuDomainDestroyFlags(virDomainPtr dom,
qemuDomainSetFakeReboot(driver, vm, false);
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
/* We need to prevent monitor EOF callback from doing our work (and sending
* misleading events) while the vm is unlocked inside BeginJob/ProcessKill API
@@ -2234,7 +2237,7 @@ qemuDomainDestroyFlags(virDomainPtr dom,
goto endjob;
}
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, 0);
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_DESTROYED, stopFlags);
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
VIR_DOMAIN_EVENT_STOPPED_DESTROYED);
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index f5866c4..364c489 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -3463,7 +3463,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
stop:
virDomainAuditStart(vm, "migrated", false);
- qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED, 0);
+ qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_FAILED,
+ VIR_QEMU_PROCESS_STOP_MIGRATED);
endjob:
qemuMigrationJobFinish(driver, vm);
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 23baa82..978dc21 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -285,6 +285,7 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
int eventReason = VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN;
int stopReason = VIR_DOMAIN_SHUTOFF_SHUTDOWN;
const char *auditReason = "shutdown";
+ unsigned int stopFlags = 0;
VIR_DEBUG("Received EOF on %p '%s'", vm, vm->def->name);
@@ -310,14 +311,16 @@ qemuProcessHandleMonitorEOF(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
auditReason = "failed";
}
- if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
qemuMigrationErrorSave(driver, vm->def->name,
qemuMonitorLastError(priv->mon));
+ }
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_STOPPED,
eventReason);
- qemuProcessStop(driver, vm, stopReason, 0);
+ qemuProcessStop(driver, vm, stopReason, stopFlags);
virDomainAuditStop(vm, auditReason);
if (!vm->persistent) {
@@ -5693,9 +5696,13 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = dom->privateData;
virObjectEventPtr event = NULL;
+ unsigned int stopFlags = 0;
VIR_DEBUG("vm=%s, conn=%p", dom->def->name, conn);
+ if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN)
+ stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
+
if (priv->job.asyncJob) {
VIR_DEBUG("vm=%s has long-term job active, cancelling",
dom->def->name);
@@ -5708,8 +5715,7 @@ qemuProcessAutoDestroy(virDomainObjPtr dom,
VIR_DEBUG("Killing domain");
- qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED,
- VIR_QEMU_PROCESS_STOP_MIGRATED);
+ qemuProcessStop(driver, dom, VIR_DOMAIN_SHUTOFF_DESTROYED, stopFlags);
virDomainAuditStop(dom, "destroyed");
event = virDomainEventLifecycleNewFromObj(dom,
--
2.5.0
9 years, 3 months
[libvirt] [PATCH] There is no virDomainFindBy{ID, Name, UUID} anymore
by Cao jin
s/virDomainFindBy/virDomainObjListFindBy/
Signed-off-by: Cao jin <caoj.fnst(a)cn.fujitsu.com>
---
src/qemu/THREADS.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/qemu/THREADS.txt b/src/qemu/THREADS.txt
index f3502b4..22d590e 100644
--- a/src/qemu/THREADS.txt
+++ b/src/qemu/THREADS.txt
@@ -25,7 +25,7 @@ There are a number of locks on various objects
* virDomainObjPtr
- Will be locked after calling any of the virDomainFindBy{ID,Name,UUID}
+ Will be locked after calling any of the virDomainObjListFindBy{ID,Name,UUID}
methods. However, preferred method is qemuDomObjFromDomain() that uses
virDomainFindByUUIDRef() which also increases the reference counter and
finds the domain in the domain list without blocking all other lookups.
--
2.1.0
9 years, 3 months