2009/10/14 Daniel P. Berrange <berrange(a)redhat.com>:
The current virDomainObjListPtr object stores domain objects in
an array. This means that to find a particular objects requires
O(n) time, and more critically acquiring O(n) mutex locks.
The new impl replaces the array with a virHashTable, keyed off
UUID. Finding a object based on UUID is now O(1) time, and only
requires a single mutex lock. Finding by name/id is unchanged
in complexity.
In changing this, all code which iterates over the array had
to be updated to use a hash table iterator function callback.
Several of the functions which were identically duplicating
across all drivers were pulled into domain_conf.c
* src/conf/domain_conf.h, src/conf/domain_conf.c: Change
virDomainObjListPtr to use virHashTable. Add a initializer
method virDomainObjListInit, and rename virDomainObjListFree
to virDomainObjListDeinit, since its not actually freeing
the container, only its contents. Also add some convenient
methods virDomainObjListGetInactiveNames,
virDomainObjListGetActiveIDs and virDomainObjListNumOfDomains
which can be used to implement the correspondingly named
public API entry points in drivers
* src/libvirt_private.syms: Export new methods from domain_conf.h
* src/lxc/lxc_driver.c, src/opennebula/one_driver.c,
src/openvz/openvz_conf.c, src/openvz/openvz_driver.c,
src/qemu/qemu_driver.c, src/test/test_driver.c,
src/uml/uml_driver.c, src/vbox/vbox_tmpl.c: Update all code
to deal with hash tables instead of arrays for domains
---
+
+static void virDomainObjListCopyInactiveNames(void *payload, const char *name
ATTRIBUTE_UNUSED, void *opaque)
+{
+ virDomainObjPtr obj = payload;
+ struct virDomainNameData *data = opaque;
You could check if already hit OOM and return before trying to call
strdup() again, because that'll probably fail again in an OOM
situation:
if (data->oom)
return;
+ virDomainObjLock(obj);
+ if (!virDomainIsActive(obj) && data->numnames < data->maxnames) {
+ if (!(data->names[data->numnames] = strdup(obj->def->name)))
+ data->oom = 1;
+ else
+ data->numnames++;
+ }
+ virDomainObjUnlock(obj);
+}
+
+
+int virDomainObjListGetInactiveNames(virDomainObjListPtr doms,
+ char **const names,
+ int maxnames)
+{
+ struct virDomainNameData data = { 0, 0, maxnames, names };
+ int i;
+ virHashForEach(doms->objs, virDomainObjListCopyInactiveNames, &data);
+ if (data.oom)
virReportOOMError(NULL) is missing here.
+ goto cleanup;
+
+ return data.numnames;
+
+cleanup:
+ for (i = 0 ; i < data.numnames ; i++)
+ VIR_FREE(data.names[i]);
+ return -1;
+}
+
@@ -887,27 +856,15 @@ static void lxcMonitorEvent(int watch,
int events ATTRIBUTE_UNUSED,
void *data)
{
- lxc_driver_t *driver = data;
- virDomainObjPtr vm = NULL;
+ lxc_driver_t *driver = lxc_driver;
+ virDomainObjPtr vm = data;
virDomainEventPtr event = NULL;
- unsigned int i;
lxcDriverLock(driver);
- for (i = 0 ; i < driver->domains.count ; i++) {
- virDomainObjPtr tmpvm = driver->domains.objs[i];
- virDomainObjLock(tmpvm);
- if (tmpvm->monitorWatch == watch) {
- vm = tmpvm;
- break;
- }
- virDomainObjUnlock(tmpvm);
- }
- if (!vm) {
- virEventRemoveHandle(watch);
- goto cleanup;
- }
+ virDomainObjLock(vm);
+ lxcDriverUnlock(driver);
- if (vm->monitor != fd) {
+ if (vm->monitor != fd || vm->monitorWatch != watch) {
virEventRemoveHandle(watch);
goto cleanup;
}
@@ -925,11 +882,9 @@ static void lxcMonitorEvent(int watch,
}
cleanup:
- if (vm)
- virDomainObjUnlock(vm);
+ virDomainObjUnlock(vm);
if (event)
lxcDomainEventQueue(driver, event);
- lxcDriverUnlock(driver);
}
Before this change the lxcDomainEventQueue() was made while the driver
lock was held, now its made without the lock being held. I'm pretty
sure that this is not intended because all other calls to
lxcDomainEventQueue() are made while the driver lock is being held.
static void
qemudDispatchVMEvent(int watch, int fd, int events, void *opaque) {
- struct qemud_driver *driver = opaque;
- virDomainObjPtr vm = NULL;
+ struct qemud_driver *driver = qemu_driver;
+ virDomainObjPtr vm = opaque;
virDomainEventPtr event = NULL;
- unsigned int i;
int quit = 0, failed = 0;
+ /* XXX Normally we have to lock the driver first, to protect
+ * against someone adding/removing the domain. We know,
+ * however, then if we're getting data in this callback
+ * the VM must be running. Nowhere is allowed to remove
+ * a domain while it is running, so it is safe to not
+ * lock the driver here... */
qemuDriverLock(driver);
- for (i = 0 ; i < driver->domains.count ; i++) {
- virDomainObjPtr tmpvm = driver->domains.objs[i];
- virDomainObjLock(tmpvm);
- if (virDomainIsActive(tmpvm) &&
- tmpvm->monitorWatch == watch) {
- vm = tmpvm;
- break;
- }
- virDomainObjUnlock(tmpvm);
- }
-
- if (!vm)
- goto cleanup;
+ virDomainObjLock(vm);
+ qemuDriverUnlock(driver);
- if (vm->monitor != fd) {
+ if (vm->monitor != fd || vm->monitorWatch != watch) {
failed = 1;
} else {
if (events & (VIR_EVENT_HANDLE_HANGUP | VIR_EVENT_HANDLE_ERROR))
@@ -2302,12 +2292,9 @@ qemudDispatchVMEvent(int watch, int fd, int events, void *opaque)
{
}
}
-cleanup:
- if (vm)
- virDomainObjUnlock(vm);
+ virDomainObjUnlock(vm);
if (event)
qemuDomainEventQueue(driver, event);
- qemuDriverUnlock(driver);
}
Same as with lxcDomainEventQueue() above, the driver lock is no longer
being held while calling qemuDomainEventQueue(), but it should be.
Besides the remarks, ACK.
Matthias