[libvirt] [PATCH] logging: inhibit virtlogd shutdown while log files are open
by Daniel P. Berrange
The virtlogd daemon is launched with a 30 second timeout for
unprivileged users. Unfortunately the timeout is only inhibited
while RPC clients are connected, and they only connect for a
short while to open the log file descriptor. We need to hold
an inhibition for as long as the log file descriptor itself
is open.
Signed-off-by: Daniel P. Berrange <berrange(a)redhat.com>
---
src/logging/log_daemon.c | 19 +++++++++++++++++--
src/logging/log_handler.c | 33 +++++++++++++++++++++++++++------
src/logging/log_handler.h | 11 +++++++++--
3 files changed, 53 insertions(+), 10 deletions(-)
diff --git a/src/logging/log_daemon.c b/src/logging/log_daemon.c
index 7a1afec..3fda9ca 100644
--- a/src/logging/log_daemon.c
+++ b/src/logging/log_daemon.c
@@ -124,6 +124,17 @@ virLogDaemonFree(virLogDaemonPtr logd)
}
+static void
+virLogDaemonInhibitor(bool inhibit, void *opaque)
+{
+ virLogDaemonPtr daemon = opaque;
+
+ if (inhibit)
+ virNetDaemonAddShutdownInhibition(daemon->dmn);
+ else
+ virNetDaemonRemoveShutdownInhibition(daemon->dmn);
+}
+
static virLogDaemonPtr
virLogDaemonNew(virLogDaemonConfigPtr config, bool privileged)
{
@@ -152,7 +163,9 @@ virLogDaemonNew(virLogDaemonConfigPtr config, bool privileged)
virNetDaemonAddServer(logd->dmn, logd->srv) < 0)
goto error;
- if (!(logd->handler = virLogHandlerNew(privileged)))
+ if (!(logd->handler = virLogHandlerNew(privileged,
+ virLogDaemonInhibitor,
+ logd)))
goto error;
return logd;
@@ -210,7 +223,9 @@ virLogDaemonNewPostExecRestart(virJSONValuePtr object, bool privileged)
}
if (!(logd->handler = virLogHandlerNewPostExecRestart(child,
- privileged)))
+ privileged,
+ virLogDaemonInhibitor,
+ logd)))
goto error;
return logd;
diff --git a/src/logging/log_handler.c b/src/logging/log_handler.c
index 2acbca7..a4f0395 100644
--- a/src/logging/log_handler.c
+++ b/src/logging/log_handler.c
@@ -59,6 +59,9 @@ struct _virLogHandler {
bool privileged;
virLogHandlerLogFilePtr *files;
size_t nfiles;
+
+ virLogHandlerShutdownInhibitor inhibitor;
+ void *opaque;
};
static virClassPtr virLogHandlerClass;
@@ -165,13 +168,16 @@ virLogHandlerDomainLogFileEvent(int watch,
return;
error:
+ handler->inhibitor(false, handler->opaque);
virLogHandlerLogFileClose(handler, logfile);
virObjectUnlock(handler);
}
virLogHandlerPtr
-virLogHandlerNew(bool privileged)
+virLogHandlerNew(bool privileged,
+ virLogHandlerShutdownInhibitor inhibitor,
+ void *opaque)
{
virLogHandlerPtr handler;
@@ -182,6 +188,8 @@ virLogHandlerNew(bool privileged)
goto error;
handler->privileged = privileged;
+ handler->inhibitor = inhibitor;
+ handler->opaque = opaque;
return handler;
@@ -191,7 +199,8 @@ virLogHandlerNew(bool privileged)
static virLogHandlerLogFilePtr
-virLogHandlerLogFilePostExecRestart(virJSONValuePtr object)
+virLogHandlerLogFilePostExecRestart(virLogHandlerPtr handler,
+ virJSONValuePtr object)
{
virLogHandlerLogFilePtr file;
const char *path;
@@ -199,6 +208,8 @@ virLogHandlerLogFilePostExecRestart(virJSONValuePtr object)
if (VIR_ALLOC(file) < 0)
return NULL;
+ handler->inhibitor(true, handler->opaque);
+
if ((path = virJSONValueObjectGetString(object, "path")) == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("Missing file path in JSON document"));
@@ -226,6 +237,7 @@ virLogHandlerLogFilePostExecRestart(virJSONValuePtr object)
return file;
error:
+ handler->inhibitor(false, handler->opaque);
virLogHandlerLogFileFree(file);
return NULL;
}
@@ -233,14 +245,18 @@ virLogHandlerLogFilePostExecRestart(virJSONValuePtr object)
virLogHandlerPtr
virLogHandlerNewPostExecRestart(virJSONValuePtr object,
- bool privileged)
+ bool privileged,
+ virLogHandlerShutdownInhibitor inhibitor,
+ void *opaque)
{
virLogHandlerPtr handler;
virJSONValuePtr files;
ssize_t n;
size_t i;
- if (!(handler = virLogHandlerNew(privileged)))
+ if (!(handler = virLogHandlerNew(privileged,
+ inhibitor,
+ opaque)))
return NULL;
if (!(files = virJSONValueObjectGet(object, "files"))) {
@@ -259,7 +275,7 @@ virLogHandlerNewPostExecRestart(virJSONValuePtr object,
virLogHandlerLogFilePtr file;
virJSONValuePtr child = virJSONValueArrayGet(files, i);
- if (!(file = virLogHandlerLogFilePostExecRestart(child)))
+ if (!(file = virLogHandlerLogFilePostExecRestart(handler, child)))
goto error;
if (VIR_APPEND_ELEMENT_COPY(handler->files, handler->nfiles, file) < 0)
@@ -290,8 +306,10 @@ virLogHandlerDispose(void *obj)
virLogHandlerPtr handler = obj;
size_t i;
- for (i = 0; i < handler->nfiles; i++)
+ for (i = 0; i < handler->nfiles; i++) {
+ handler->inhibitor(false, handler->opaque);
virLogHandlerLogFileFree(handler->files[i]);
+ }
VIR_FREE(handler->files);
}
@@ -341,6 +359,8 @@ virLogHandlerDomainOpenLogFile(virLogHandlerPtr handler,
virObjectLock(handler);
+ handler->inhibitor(true, handler->opaque);
+
if (!(path = virLogHandlerGetLogFilePathForDomain(handler,
driver,
domuuid,
@@ -400,6 +420,7 @@ virLogHandlerDomainOpenLogFile(virLogHandlerPtr handler,
VIR_FREE(path);
VIR_FORCE_CLOSE(pipefd[0]);
VIR_FORCE_CLOSE(pipefd[1]);
+ handler->inhibitor(false, handler->opaque);
virLogHandlerLogFileFree(file);
virObjectUnlock(handler);
return -1;
diff --git a/src/logging/log_handler.h b/src/logging/log_handler.h
index 1ad755e..e41ac7f 100644
--- a/src/logging/log_handler.h
+++ b/src/logging/log_handler.h
@@ -30,9 +30,16 @@ typedef struct _virLogHandler virLogHandler;
typedef virLogHandler *virLogHandlerPtr;
-virLogHandlerPtr virLogHandlerNew(bool privileged);
+typedef void (*virLogHandlerShutdownInhibitor)(bool inhibit,
+ void *opaque);
+
+virLogHandlerPtr virLogHandlerNew(bool privileged,
+ virLogHandlerShutdownInhibitor inhibitor,
+ void *opaque);
virLogHandlerPtr virLogHandlerNewPostExecRestart(virJSONValuePtr child,
- bool privileged);
+ bool privileged,
+ virLogHandlerShutdownInhibitor inhibitor,
+ void *opaque);
void virLogHandlerFree(virLogHandlerPtr handler);
--
2.5.0
8 years, 12 months
[libvirt] [libvirt-glib v4] gobject: Port to GTask API
by Zeeshan Ali (Khattak)
Drop usage of deprecated GSimpleAsyncResult API.
---
libvirt-gobject/libvirt-gobject-domain.c | 290 +++++++++---------------
libvirt-gobject/libvirt-gobject-input-stream.c | 77 +++----
libvirt-gobject/libvirt-gobject-output-stream.c | 75 +++---
libvirt-gobject/libvirt-gobject-storage-pool.c | 281 ++++++++++-------------
libvirt-gobject/libvirt-gobject-stream.c | 53 +++--
5 files changed, 322 insertions(+), 454 deletions(-)
diff --git a/libvirt-gobject/libvirt-gobject-domain.c b/libvirt-gobject/libvirt-gobject-domain.c
index 5509ce3..c768cd3 100644
--- a/libvirt-gobject/libvirt-gobject-domain.c
+++ b/libvirt-gobject/libvirt-gobject-domain.c
@@ -370,28 +370,20 @@ gboolean gvir_domain_start(GVirDomain *dom,
return TRUE;
}
-typedef struct {
- guint flags;
-} DomainStartData;
-
-static void domain_start_data_free(DomainStartData *data)
-{
- g_slice_free(DomainStartData, data);
-}
-
static void
-gvir_domain_start_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_start_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
- DomainStartData *data;
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
+ guint flags = GPOINTER_TO_UINT(task_data);
GError *err = NULL;
- data = g_simple_async_result_get_op_res_gpointer(res);
-
- if (!gvir_domain_start(dom, data->flags, &err))
- g_simple_async_result_take_error(res, err);
+ if (!gvir_domain_start(dom, flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -410,25 +402,18 @@ void gvir_domain_start_async(GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
- DomainStartData *data;
+ GTask *task;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- data = g_slice_new0(DomainStartData);
- data->flags = flags;
-
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_start_async);
- g_simple_async_result_set_op_res_gpointer (res, data, (GDestroyNotify)domain_start_data_free);
- g_simple_async_result_run_in_thread(res,
- gvir_domain_start_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, GUINT_TO_POINTER(flags), NULL);
+ g_task_run_in_thread(task, gvir_domain_start_helper);
+ g_object_unref(task);
}
gboolean gvir_domain_start_finish(GVirDomain *dom,
@@ -436,13 +421,10 @@ gboolean gvir_domain_start_finish(GVirDomain *dom,
GError **err)
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(dom), gvir_domain_start_async), FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, dom), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result), err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -472,15 +454,18 @@ gboolean gvir_domain_resume(GVirDomain *dom,
}
static void
-gvir_domain_resume_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_resume_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data G_GNUC_UNUSED,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
GError *err = NULL;
if (!gvir_domain_resume(dom, &err))
- g_simple_async_result_take_error(res, err);
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -497,20 +482,17 @@ void gvir_domain_resume_async(GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_resume_async);
- g_simple_async_result_run_in_thread(res,
- gvir_domain_resume_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_run_in_thread(task, gvir_domain_resume_helper);
+ g_object_unref(task);
}
gboolean gvir_domain_resume_finish(GVirDomain *dom,
@@ -518,13 +500,10 @@ gboolean gvir_domain_resume_finish(GVirDomain *dom,
GError **err)
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(dom), gvir_domain_resume_async), FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, dom), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result), err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -556,15 +535,19 @@ gboolean gvir_domain_wakeup(GVirDomain *dom,
}
static void
-gvir_domain_wakeup_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_wakeup_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
+ guint flags = GPOINTER_TO_UINT(task_data);
GError *err = NULL;
- if (!gvir_domain_wakeup(dom, (guint)g_simple_async_result_get_op_res_gssize(res), &err))
- g_simple_async_result_take_error(res, err);
+ if (!gvir_domain_wakeup(dom, flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -583,21 +566,18 @@ void gvir_domain_wakeup_async(GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_wakeup_async);
- g_simple_async_result_set_op_res_gssize (res, (gssize)flags);
- g_simple_async_result_run_in_thread(res,
- gvir_domain_wakeup_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, GUINT_TO_POINTER(flags), NULL);
+ g_task_run_in_thread(task, gvir_domain_wakeup_helper);
+ g_object_unref(task);
}
gboolean gvir_domain_wakeup_finish(GVirDomain *dom,
@@ -605,13 +585,10 @@ gboolean gvir_domain_wakeup_finish(GVirDomain *dom,
GError **err)
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(dom), gvir_domain_wakeup_async), FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, dom), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result), err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -785,18 +762,19 @@ static void domain_save_to_file_data_free(DomainSaveToFileData *data)
}
static void
-gvir_domain_save_to_file_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_save_to_file_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
- DomainSaveToFileData *data;
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
+ DomainSaveToFileData *data = (DomainSaveToFileData *) task_data;
GError *err = NULL;
- data = g_simple_async_result_get_op_res_gpointer(res);
-
if (!gvir_domain_save_to_file(dom, data->filename, data->custom_conf, data->flags, &err))
- g_simple_async_result_take_error(res, err);
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -819,7 +797,7 @@ void gvir_domain_save_to_file_async(GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
DomainSaveToFileData *data;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
@@ -832,19 +810,15 @@ void gvir_domain_save_to_file_async(GVirDomain *dom,
data->custom_conf = g_object_ref(custom_conf);
data->flags = flags;
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_save_to_file_async);
- g_simple_async_result_set_op_res_gpointer(res, data, (GDestroyNotify)
- domain_save_to_file_data_free);
-
- g_simple_async_result_run_in_thread(res,
- gvir_domain_save_to_file_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
-
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task,
+ data,
+ (GDestroyNotify) domain_save_to_file_data_free);
+ g_task_run_in_thread(task, gvir_domain_save_to_file_helper);
+ g_object_unref(task);
}
/**
@@ -862,16 +836,10 @@ gboolean gvir_domain_save_to_file_finish(GVirDomain *dom,
GError **err)
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid
- (result,
- G_OBJECT(dom),
- gvir_domain_save_to_file_async), FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, dom), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result), err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -1012,22 +980,22 @@ GVirDomainInfo *gvir_domain_get_info(GVirDomain *dom,
}
static void
-gvir_domain_get_info_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_get_info_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data G_GNUC_UNUSED,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
GVirDomainInfo *info;
GError *err = NULL;
info = gvir_domain_get_info(dom, &err);
if (err)
- g_simple_async_result_take_error(res, err);
+ g_task_return_error(task, err);
else
- g_simple_async_result_set_op_res_gpointer
- (res,
- info,
- (GDestroyNotify) gvir_domain_info_free);
+ g_task_return_pointer(task,
+ info,
+ (GDestroyNotify) gvir_domain_info_free);
}
/**
@@ -1044,20 +1012,17 @@ void gvir_domain_get_info_async(GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_get_info_async);
- g_simple_async_result_run_in_thread(res,
- gvir_domain_get_info_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_run_in_thread(task, gvir_domain_get_info_helper);
+ g_object_unref(task);
}
/**
@@ -1075,22 +1040,11 @@ GVirDomainInfo *gvir_domain_get_info_finish(GVirDomain *dom,
GAsyncResult *result,
GError **err)
{
- GSimpleAsyncResult *res = G_SIMPLE_ASYNC_RESULT(result);
- GVirDomainInfo *ret;
-
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), NULL);
- g_return_val_if_fail
- (g_simple_async_result_is_valid(result,
- G_OBJECT(dom),
- gvir_domain_get_info_async),
- NULL);
-
- if (g_simple_async_result_propagate_error(res, err))
- return NULL;
-
- ret = g_simple_async_result_get_op_res_gpointer(res);
+ g_return_val_if_fail(g_task_is_valid(result, dom), NULL);
+ g_return_val_if_fail(err == NULL || *err == NULL, NULL);
- return gvir_domain_info_copy (ret);
+ return g_task_propagate_pointer(G_TASK(result), err);
}
/**
@@ -1365,28 +1319,20 @@ gboolean gvir_domain_save (GVirDomain *dom,
return TRUE;
}
-typedef struct {
- guint flags;
-} DomainSaveData;
-
-static void domain_save_data_free(DomainSaveData *data)
-{
- g_slice_free (DomainSaveData, data);
-}
-
static void
-gvir_domain_save_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_domain_save_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirDomain *dom = GVIR_DOMAIN(object);
- DomainSaveData *data;
+ GVirDomain *dom = GVIR_DOMAIN(source_object);
+ guint flags = GPOINTER_TO_UINT(task_data);
GError *err = NULL;
- data = g_simple_async_result_get_op_res_gpointer (res);
-
- if (!gvir_domain_save(dom, data->flags, &err))
- g_simple_async_result_take_error(res, err);
+ if (!gvir_domain_save(dom, flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -1405,25 +1351,18 @@ void gvir_domain_save_async (GVirDomain *dom,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
- DomainSaveData *data;
+ GTask *task;
g_return_if_fail(GVIR_IS_DOMAIN(dom));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- data = g_slice_new0(DomainSaveData);
- data->flags = flags;
-
- res = g_simple_async_result_new(G_OBJECT(dom),
- callback,
- user_data,
- gvir_domain_save_async);
- g_simple_async_result_set_op_res_gpointer (res, data, (GDestroyNotify) domain_save_data_free);
- g_simple_async_result_run_in_thread(res,
- gvir_domain_save_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(dom),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, GUINT_TO_POINTER(flags), NULL);
+ g_task_run_in_thread(task, gvir_domain_save_helper);
+ g_object_unref(task);
}
/**
@@ -1441,15 +1380,10 @@ gboolean gvir_domain_save_finish (GVirDomain *dom,
GError **err)
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(dom),
- gvir_domain_save_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, dom), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result), err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -1682,6 +1616,7 @@ GVirDomainSnapshot *gvir_domain_create_snapshot_finish(GVirDomain *domain,
GError **error)
{
g_return_val_if_fail(g_task_is_valid(result, domain), NULL);
+ g_return_val_if_fail(error == NULL || *error == NULL, NULL);
return g_task_propagate_pointer(G_TASK(result), error);
}
@@ -1847,6 +1782,7 @@ gboolean gvir_domain_fetch_snapshots_finish(GVirDomain *dom,
{
g_return_val_if_fail(GVIR_IS_DOMAIN(dom), FALSE);
g_return_val_if_fail(g_task_is_valid(res, dom), FALSE);
+ g_return_val_if_fail(error == NULL || *error == NULL, FALSE);
return g_task_propagate_boolean(G_TASK(res), error);
}
diff --git a/libvirt-gobject/libvirt-gobject-input-stream.c b/libvirt-gobject/libvirt-gobject-input-stream.c
index ff1a70c..cd107e1 100644
--- a/libvirt-gobject/libvirt-gobject-input-stream.c
+++ b/libvirt-gobject/libvirt-gobject-input-stream.c
@@ -45,8 +45,7 @@ struct _GVirInputStreamPrivate
GVirStream *stream;
/* pending operation metadata */
- GSimpleAsyncResult *result;
- GCancellable *cancellable;
+ GTask *task;
gpointer buffer;
gsize count;
};
@@ -103,48 +102,44 @@ gvir_input_stream_read_ready(GVirStream *stream,
{
GVirInputStream *input_stream = GVIR_INPUT_STREAM(opaque);
GVirInputStreamPrivate *priv = input_stream->priv;
- GSimpleAsyncResult *simple = priv->result;
+ GTask *task = priv->task;
+ GCancellable *cancellable = g_task_get_cancellable(task);
GError *error = NULL;
gssize result;
+ priv->task = NULL;
+
if (!(cond & GVIR_STREAM_IO_CONDITION_READABLE)) {
g_warn_if_reached();
- g_simple_async_result_set_error(simple,
- G_IO_ERROR,
- G_IO_ERROR_INVALID_ARGUMENT,
- "%s",
- "Expected stream to be readable");
+ g_task_return_new_error(task,
+ G_IO_ERROR,
+ G_IO_ERROR_INVALID_ARGUMENT,
+ "%s",
+ "Expected stream to be readable");
goto cleanup;
}
- result = gvir_stream_receive(stream, priv->buffer, priv->count,
- priv->cancellable, &error);
+ result = gvir_stream_receive(stream, priv->buffer, priv->count,
+ cancellable, &error);
+ if (error != NULL) {
+ if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) {
+ g_warn_if_reached();
+ g_task_return_new_error(task,
+ G_IO_ERROR,
+ G_IO_ERROR_INVALID_ARGUMENT,
+ "%s",
+ "Expected stream to be readable");
+ } else {
+ g_task_return_error(task, error);
+ }
- if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) {
- g_warn_if_reached();
- g_simple_async_result_set_error(simple,
- G_IO_ERROR,
- G_IO_ERROR_INVALID_ARGUMENT,
- "%s",
- "Expected stream to be readable");
goto cleanup;
}
- if (result >= 0)
- g_simple_async_result_set_op_res_gssize(simple, result);
-
- if (error)
- g_simple_async_result_take_error(simple, error);
-
- if (priv->cancellable) {
- g_object_unref(priv->cancellable);
- priv->cancellable = NULL;
- }
+ g_task_return_int(task, result);
cleanup:
- priv->result = NULL;
- g_simple_async_result_complete(simple);
- g_object_unref(simple);
+ g_object_unref(task);
return FALSE;
}
@@ -159,7 +154,7 @@ static void gvir_input_stream_read_async(GInputStream *stream,
GVirInputStream *input_stream = GVIR_INPUT_STREAM(stream);
g_return_if_fail(GVIR_IS_INPUT_STREAM(stream));
- g_return_if_fail(input_stream->priv->result == NULL);
+ g_return_if_fail(input_stream->priv->task == NULL);
gvir_stream_add_watch_full(input_stream->priv->stream,
G_PRIORITY_DEFAULT,
@@ -168,12 +163,8 @@ static void gvir_input_stream_read_async(GInputStream *stream,
g_object_ref(stream),
(GDestroyNotify)g_object_unref);
- input_stream->priv->result =
- g_simple_async_result_new(G_OBJECT(stream), callback, user_data,
- gvir_input_stream_read_async);
- if (cancellable)
- g_object_ref(cancellable);
- input_stream->priv->cancellable = cancellable;
+ input_stream->priv->task =
+ g_task_new(stream, cancellable, callback, user_data);
input_stream->priv->buffer = buffer;
input_stream->priv->count = count;
}
@@ -181,22 +172,18 @@ static void gvir_input_stream_read_async(GInputStream *stream,
static gssize gvir_input_stream_read_finish(GInputStream *stream,
GAsyncResult *result,
- GError **error G_GNUC_UNUSED)
+ GError **error)
{
GVirInputStream *input_stream = GVIR_INPUT_STREAM(stream);
- GSimpleAsyncResult *simple;
virStreamPtr handle;
gssize count;
g_return_val_if_fail(GVIR_IS_INPUT_STREAM(stream), -1);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(stream),
- gvir_input_stream_read_async),
- -1);
+ g_return_val_if_fail(g_task_is_valid(result, stream), -1);
+ g_return_val_if_fail(error == NULL || *error == NULL, -1);
g_object_get(input_stream->priv->stream, "handle", &handle, NULL);
- simple = G_SIMPLE_ASYNC_RESULT(result);
-
- count = g_simple_async_result_get_op_res_gssize(simple);
+ count = g_task_propagate_int(G_TASK(result), error);
virStreamFree(handle);
diff --git a/libvirt-gobject/libvirt-gobject-output-stream.c b/libvirt-gobject/libvirt-gobject-output-stream.c
index f39328b..a9c1236 100644
--- a/libvirt-gobject/libvirt-gobject-output-stream.c
+++ b/libvirt-gobject/libvirt-gobject-output-stream.c
@@ -45,8 +45,7 @@ struct _GVirOutputStreamPrivate
GVirStream *stream;
/* pending operation metadata */
- GSimpleAsyncResult *result;
- GCancellable *cancellable;
+ GTask *task;
const void * buffer;
gsize count;
};
@@ -103,48 +102,44 @@ gvir_output_stream_write_ready(GVirStream *stream,
{
GVirOutputStream *output_stream = GVIR_OUTPUT_STREAM(opaque);
GVirOutputStreamPrivate *priv = output_stream->priv;
- GSimpleAsyncResult *simple = priv->result;
+ GTask *task = priv->task;
+ GCancellable *cancellable = g_task_get_cancellable(task);
GError *error = NULL;
gssize result;
if (!(cond & GVIR_STREAM_IO_CONDITION_WRITABLE)) {
g_warn_if_reached();
- g_simple_async_result_set_error(simple,
- G_IO_ERROR,
- G_IO_ERROR_INVALID_ARGUMENT,
- "%s",
- "Expected stream to be writable");
+ g_task_return_new_error(task,
+ G_IO_ERROR,
+ G_IO_ERROR_INVALID_ARGUMENT,
+ "%s",
+ "Expected stream to be readable");
goto cleanup;
}
result = gvir_stream_send(stream, priv->buffer, priv->count,
- priv->cancellable, &error);
+ cancellable, &error);
+
+ if (error != NULL) {
+ if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) {
+ g_warn_if_reached();
+ g_task_return_new_error(task,
+ G_IO_ERROR,
+ G_IO_ERROR_INVALID_ARGUMENT,
+ "%s",
+ "Expected stream to be writable");
+ } else {
+ g_task_return_error(task, error);
+ }
- if (g_error_matches(error, G_IO_ERROR, G_IO_ERROR_WOULD_BLOCK)) {
- g_warn_if_reached();
- g_simple_async_result_set_error(simple,
- G_IO_ERROR,
- G_IO_ERROR_INVALID_ARGUMENT,
- "%s",
- "Expected stream to be writable");
goto cleanup;
}
- if (result >= 0)
- g_simple_async_result_set_op_res_gssize(simple, result);
-
- if (error)
- g_simple_async_result_take_error(simple, error);
-
- if (priv->cancellable) {
- g_object_unref(priv->cancellable);
- priv->cancellable = NULL;
- }
+ g_task_return_int(task, result);
cleanup:
- priv->result = NULL;
- g_simple_async_result_complete(simple);
- g_object_unref(simple);
+ priv->task = NULL;
+ g_object_unref(task);
return FALSE;
}
@@ -159,7 +154,7 @@ static void gvir_output_stream_write_async(GOutputStream *stream,
GVirOutputStream *output_stream = GVIR_OUTPUT_STREAM(stream);
g_return_if_fail(GVIR_IS_OUTPUT_STREAM(stream));
- g_return_if_fail(output_stream->priv->result == NULL);
+ g_return_if_fail(output_stream->priv->task == NULL);
gvir_stream_add_watch_full(output_stream->priv->stream,
G_PRIORITY_DEFAULT,
@@ -168,12 +163,8 @@ static void gvir_output_stream_write_async(GOutputStream *stream,
g_object_ref(stream),
(GDestroyNotify)g_object_unref);
- output_stream->priv->result =
- g_simple_async_result_new(G_OBJECT(stream), callback, user_data,
- gvir_output_stream_write_async);
- if (cancellable)
- g_object_ref(cancellable);
- output_stream->priv->cancellable = cancellable;
+ output_stream->priv->task =
+ g_task_new(stream, cancellable, callback, user_data);
output_stream->priv->buffer = buffer;
output_stream->priv->count = count;
}
@@ -181,22 +172,18 @@ static void gvir_output_stream_write_async(GOutputStream *stream,
static gssize gvir_output_stream_write_finish(GOutputStream *stream,
GAsyncResult *result,
- GError **error G_GNUC_UNUSED)
+ GError **error)
{
GVirOutputStream *output_stream = GVIR_OUTPUT_STREAM(stream);
- GSimpleAsyncResult *simple;
virStreamPtr handle;
gssize count;
g_return_val_if_fail(GVIR_IS_OUTPUT_STREAM(stream), -1);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(stream),
- gvir_output_stream_write_async),
- -1);
+ g_return_val_if_fail(g_task_is_valid(result, stream), -1);
+ g_return_val_if_fail(error == NULL || *error == NULL, -1);
g_object_get(output_stream->priv->stream, "handle", &handle, NULL);
- simple = G_SIMPLE_ASYNC_RESULT(result);
-
- count = g_simple_async_result_get_op_res_gssize(simple);
+ count = g_task_propagate_int(G_TASK(result), error);
virStreamFree(handle);
diff --git a/libvirt-gobject/libvirt-gobject-storage-pool.c b/libvirt-gobject/libvirt-gobject-storage-pool.c
index a0dcebc..6290fcb 100644
--- a/libvirt-gobject/libvirt-gobject-storage-pool.c
+++ b/libvirt-gobject/libvirt-gobject-storage-pool.c
@@ -439,17 +439,18 @@ cleanup:
}
static void
-gvir_storage_pool_refresh_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_refresh_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data G_GNUC_UNUSED,
GCancellable *cancellable)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
GError *err = NULL;
- if (!gvir_storage_pool_refresh(pool, cancellable, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
+ if (!gvir_storage_pool_refresh(pool, cancellable, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -464,20 +465,17 @@ void gvir_storage_pool_refresh_async(GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_refresh_async);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_refresh_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_run_in_thread(task, gvir_storage_pool_refresh_helper);
+ g_object_unref(task);
}
/**
@@ -490,15 +488,10 @@ gboolean gvir_storage_pool_refresh_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_refresh_async),
- FALSE);
-
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
+ g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
static void gvir_storage_vol_ref(gpointer obj, gpointer ignore G_GNUC_UNUSED)
@@ -658,24 +651,25 @@ typedef struct {
guint flags;
} StoragePoolBuildData;
+static void storage_pool_build_data_free(StoragePoolBuildData *data)
+{
+ g_slice_free(StoragePoolBuildData, data);
+}
+
static void
-gvir_storage_pool_build_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_build_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
- StoragePoolBuildData *data;
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
+ StoragePoolBuildData *data = (StoragePoolBuildData *) task_data;
GError *err = NULL;
- data = (StoragePoolBuildData *) g_object_get_data(G_OBJECT(res),
- "StoragePoolBuildData");
-
- if (!gvir_storage_pool_build(pool, data->flags, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
-
- g_slice_free (StoragePoolBuildData, data);
+ if (!gvir_storage_pool_build(pool, data->flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -692,7 +686,7 @@ void gvir_storage_pool_build_async (GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
StoragePoolBuildData *data;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
@@ -701,16 +695,13 @@ void gvir_storage_pool_build_async (GVirStoragePool *pool,
data = g_slice_new0(StoragePoolBuildData);
data->flags = flags;
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_build_async);
- g_object_set_data(G_OBJECT(res), "StoragePoolBuildData", data);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_build_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, data, (GDestroyNotify)storage_pool_build_data_free);
+ g_task_run_in_thread(task, gvir_storage_pool_build_helper);
+ g_object_unref(task);
}
/**
@@ -726,16 +717,10 @@ gboolean gvir_storage_pool_build_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_build_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -762,17 +747,18 @@ gboolean gvir_storage_pool_undefine (GVirStoragePool *pool,
}
static void
-gvir_storage_pool_undefine_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_undefine_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data G_GNUC_UNUSED,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
GError *err = NULL;
- if (!gvir_storage_pool_undefine(pool, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
+ if (!gvir_storage_pool_undefine(pool, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -787,20 +773,17 @@ void gvir_storage_pool_undefine_async (GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_undefine_async);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_undefine_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_run_in_thread(task, gvir_storage_pool_undefine_helper);
+ g_object_unref(task);
}
/**
@@ -816,16 +799,10 @@ gboolean gvir_storage_pool_undefine_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_undefine_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -854,23 +831,21 @@ gboolean gvir_storage_pool_start (GVirStoragePool *pool,
}
static void
-gvir_storage_pool_start_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_start_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
StoragePoolBuildData *data;
GError *err = NULL;
- data = (StoragePoolBuildData *) g_object_get_data(G_OBJECT(res),
- "StoragePoolBuildData");
+ data = (StoragePoolBuildData *) task_data;
- if (!gvir_storage_pool_start(pool, data->flags, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
-
- g_slice_free (StoragePoolBuildData, data);
+ if (!gvir_storage_pool_start(pool, data->flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -887,7 +862,7 @@ void gvir_storage_pool_start_async (GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
StoragePoolBuildData *data;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
@@ -896,16 +871,13 @@ void gvir_storage_pool_start_async (GVirStoragePool *pool,
data = g_slice_new0(StoragePoolBuildData);
data->flags = flags;
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_start_async);
- g_object_set_data(G_OBJECT(res), "StoragePoolBuildData", data);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_start_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, data, (GDestroyNotify)storage_pool_build_data_free);
+ g_task_run_in_thread(task, gvir_storage_pool_start_helper);
+ g_object_unref(task);
}
/**
@@ -921,16 +893,10 @@ gboolean gvir_storage_pool_start_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_start_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -957,17 +923,18 @@ gboolean gvir_storage_pool_stop (GVirStoragePool *pool,
}
static void
-gvir_storage_pool_stop_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_stop_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data G_GNUC_UNUSED,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
GError *err = NULL;
- if (!gvir_storage_pool_stop(pool, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
+ if (!gvir_storage_pool_stop(pool, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -982,20 +949,17 @@ void gvir_storage_pool_stop_async (GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
g_return_if_fail((cancellable == NULL) || G_IS_CANCELLABLE(cancellable));
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_stop_async);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_stop_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_run_in_thread(task, gvir_storage_pool_stop_helper);
+ g_object_unref(task);
}
/**
@@ -1011,16 +975,10 @@ gboolean gvir_storage_pool_stop_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_stop_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
/**
@@ -1100,23 +1058,21 @@ gboolean gvir_storage_pool_set_autostart(GVirStoragePool *pool,
}
static void
-gvir_storage_pool_delete_helper(GSimpleAsyncResult *res,
- GObject *object,
+gvir_storage_pool_delete_helper(GTask *task,
+ gpointer source_object,
+ gpointer task_data,
GCancellable *cancellable G_GNUC_UNUSED)
{
- GVirStoragePool *pool = GVIR_STORAGE_POOL(object);
+ GVirStoragePool *pool = GVIR_STORAGE_POOL(source_object);
StoragePoolBuildData *data;
GError *err = NULL;
- data = (StoragePoolBuildData *) g_object_get_data(G_OBJECT(res),
- "StoragePoolBuildData");
+ data = (StoragePoolBuildData *) task_data;
- if (!gvir_storage_pool_delete(pool, data->flags, &err)) {
- g_simple_async_result_set_from_error(res, err);
- g_error_free(err);
- }
-
- g_slice_free (StoragePoolBuildData, data);
+ if (!gvir_storage_pool_delete(pool, data->flags, &err))
+ g_task_return_error(task, err);
+ else
+ g_task_return_boolean(task, TRUE);
}
/**
@@ -1133,7 +1089,7 @@ void gvir_storage_pool_delete_async (GVirStoragePool *pool,
GAsyncReadyCallback callback,
gpointer user_data)
{
- GSimpleAsyncResult *res;
+ GTask *task;
StoragePoolBuildData *data;
g_return_if_fail(GVIR_IS_STORAGE_POOL(pool));
@@ -1142,16 +1098,13 @@ void gvir_storage_pool_delete_async (GVirStoragePool *pool,
data = g_slice_new0(StoragePoolBuildData);
data->flags = flags;
- res = g_simple_async_result_new(G_OBJECT(pool),
- callback,
- user_data,
- gvir_storage_pool_delete_async);
- g_object_set_data(G_OBJECT(res), "StoragePoolBuildData", data);
- g_simple_async_result_run_in_thread(res,
- gvir_storage_pool_delete_helper,
- G_PRIORITY_DEFAULT,
- cancellable);
- g_object_unref(res);
+ task = g_task_new(G_OBJECT(pool),
+ cancellable,
+ callback,
+ user_data);
+ g_task_set_task_data(task, data, (GDestroyNotify)storage_pool_build_data_free);
+ g_task_run_in_thread(task, gvir_storage_pool_delete_helper);
+ g_object_unref(task);
}
/**
@@ -1167,16 +1120,10 @@ gboolean gvir_storage_pool_delete_finish(GVirStoragePool *pool,
GError **err)
{
g_return_val_if_fail(GVIR_IS_STORAGE_POOL(pool), FALSE);
- g_return_val_if_fail(g_simple_async_result_is_valid(result, G_OBJECT(pool),
- gvir_storage_pool_delete_async),
- FALSE);
+ g_return_val_if_fail(g_task_is_valid(result, pool), FALSE);
g_return_val_if_fail(err == NULL || *err == NULL, FALSE);
- if (g_simple_async_result_propagate_error(G_SIMPLE_ASYNC_RESULT(result),
- err))
- return FALSE;
-
- return TRUE;
+ return g_task_propagate_boolean(G_TASK(result), err);
}
G_GNUC_INTERNAL void gvir_storage_pool_delete_vol(GVirStoragePool *pool,
diff --git a/libvirt-gobject/libvirt-gobject-stream.c b/libvirt-gobject/libvirt-gobject-stream.c
index 46dbd9a..2a8fbd5 100644
--- a/libvirt-gobject/libvirt-gobject-stream.c
+++ b/libvirt-gobject/libvirt-gobject-stream.c
@@ -129,14 +129,11 @@ static gboolean gvir_stream_close(GIOStream *io_stream,
return (i_ret && o_ret);
}
-
-static void gvir_stream_close_async(GIOStream *stream,
- int io_priority G_GNUC_UNUSED,
- GCancellable *cancellable,
- GAsyncReadyCallback callback,
- gpointer user_data)
+static gboolean close_in_idle (gpointer data)
{
- GSimpleAsyncResult *res;
+ GTask *task = G_TASK (data);
+ GIOStream *stream = G_IO_STREAM(g_task_get_source_object (task));
+ GCancellable *cancellable = g_task_get_cancellable (task);
GIOStreamClass *class;
GError *error;
@@ -146,27 +143,41 @@ static void gvir_stream_close_async(GIOStream *stream,
error = NULL;
if (class->close_fn &&
!class->close_fn(stream, cancellable, &error)) {
- g_simple_async_report_take_gerror_in_idle(G_OBJECT (stream),
- callback, user_data,
- error);
- return;
+ g_task_return_error(task, error);
+ return FALSE;
}
- res = g_simple_async_result_new(G_OBJECT (stream),
- callback,
- user_data,
- gvir_stream_close_async);
- g_simple_async_result_complete_in_idle(res);
- g_object_unref (res);
+ g_task_return_boolean(task, TRUE);
+
+ return FALSE;
}
+static void gvir_stream_close_async(GIOStream *stream,
+ int io_priority G_GNUC_UNUSED,
+ GCancellable *cancellable,
+ GAsyncReadyCallback callback,
+ gpointer user_data)
+{
+ GTask *task;
+
+ task = g_task_new(G_OBJECT(stream),
+ cancellable,
+ callback,
+ user_data);
+ g_idle_add(close_in_idle, task);
+ g_object_unref(task);
+}
static gboolean
-gvir_stream_close_finish(GIOStream *stream G_GNUC_UNUSED,
- GAsyncResult *result G_GNUC_UNUSED,
- GError **error G_GNUC_UNUSED)
+gvir_stream_close_finish(GIOStream *stream,
+ GAsyncResult *result,
+ GError **error)
{
- return TRUE;
+ g_return_val_if_fail(GVIR_IS_STREAM(stream), -1);
+ g_return_val_if_fail(g_task_is_valid(result, stream), -1);
+ g_return_val_if_fail(error == NULL || *error == NULL, -1);
+
+ return g_task_propagate_boolean(G_TASK(result), error);
}
--
2.5.0
8 years, 12 months
[libvirt] [PATCH v3 0/7] qemu: Add support for -incoming defer
by Jiri Denemark
Traditionally, we pass incoming migration URI on QEMU command line,
which has some drawbacks. Depending on the URI QEMU may initialize its
migration state immediately without giving us a chance to set any
additional migration parameters (this applies mainly for fd: URIs). For
some URIs the monitor may be completely blocked from the beginning until
migration is finished, which means we may be stuck in qmp_capabilities
command without being able to send any QMP commands.
QEMU solved this by introducing "defer" parameter for -incoming command
line option. This will tell QEMU to prepare for an incoming migration
while the actual incoming URI is sent using migrate-incoming QMP
command. Before calling this command we can normally talk to the
monitor and even set any migration parameters which will be honored by
the incoming migration.
Jiri Denemark (7):
qemu: Introduce qemuProcessInit
qemu: Introduce qemuProcessLaunch
qemu: Introduce qemuProcessFinishStartup
qemu: Separate incoming URI generation from qemuMigrationPrepareAny
qemu: Kill QEMU process if Prepare phase fails
qemu: Skip starting NBD servers for offline migration
qemu: Use qemuProcessLaunch in migration Prepare phase
src/qemu/qemu_migration.c | 247 +++++++++++++----------
src/qemu/qemu_process.c | 489 +++++++++++++++++++++++++++++-----------------
src/qemu/qemu_process.h | 20 ++
3 files changed, 466 insertions(+), 290 deletions(-)
--
2.6.3
8 years, 12 months
[libvirt] [PATCHv5 0/9] Hyper-v crash feature support
by Dmitry Andreev
A new Hyper-V cpu feature 'hv_crash' was added to QEMU. The feature
will become available in v2.5.0.
What is changed in v5:
* minor code fixes
* code was moved between patches
* patch sequence changed
Dmitry Andreev (9):
conf: refactor code for checking ABI stability of panic device
conf: add 'model' attribute for panic device with values isa, pseries,
hyperv
tests: add tests for the new panic device attribute - 'model'
qemu: add support for hv_crash feature as a panic device
tests: rework tests for panic devices
tests: add tests for the new 'hyperv' panic device model
Allow multiple panic devices
tests: add tests for multiple panic devices
conf: reject multiple panic devices of same model
docs/formatdomain.html.in | 19 +++-
docs/schemas/domaincommon.rng | 13 ++-
src/conf/domain_conf.c | 123 +++++++++++++--------
src/conf/domain_conf.h | 15 ++-
src/qemu/qemu_command.c | 80 ++++++++++++--
src/qemu/qemu_domain.c | 30 ++++-
tests/qemuargv2xmltest.c | 1 +
.../qemuxml2argv-hyperv-panic.args | 21 ++++
.../qemuxml2argvdata/qemuxml2argv-hyperv-panic.xml | 25 +++++
.../qemuxml2argv-panic-double.args | 21 ++++
.../qemuxml2argvdata/qemuxml2argv-panic-double.xml | 28 +++++
tests/qemuxml2argvdata/qemuxml2argv-panic-isa.xml | 31 ++++++
.../qemuxml2argv-panic-no-address.xml | 2 +-
.../qemuxml2argv-panic-pseries.xml | 30 +++++
.../qemuxml2argvdata/qemuxml2argv-pseries-disk.xml | 2 +-
.../qemuxml2argv-pseries-nvram.xml | 2 +-
tests/qemuxml2argvtest.c | 3 +
tests/qemuxml2xmloutdata/qemuxml2xmlout-panic.xml | 31 ++++++
.../qemuxml2xmlout-pseries-panic-missing.xml | 2 +-
.../qemuxml2xmlout-pseries-panic-no-address.xml | 30 +++++
tests/qemuxml2xmltest.c | 8 +-
21 files changed, 448 insertions(+), 69 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-hyperv-panic.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-hyperv-panic.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-panic-double.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-panic-double.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-panic-isa.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-panic-pseries.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-panic.xml
create mode 100644 tests/qemuxml2xmloutdata/qemuxml2xmlout-pseries-panic-no-address.xml
--
1.8.3.1
8 years, 12 months
[libvirt] "migration_address must not be the address of the local machine: 127.0.0.1"
by Laszlo Ersek
I recently upgraded my laptop from RHEL-7.1 to RHEL-7.2.
I always pay attention to *.rpmnew config files, and I manually diff and
merge them with the ones I have in place.
I did the same with "/etc/libvirt/qemu.conf" this time.
Now libvirtd doesn't start for me. Systemd doesn't actually notice the
startup failure (insert bitter joke about systemd being so much better
than startup scripts); it only reports the service inactive/dead (=
unstarted), rather than failed.
But, the libvirtd log file gives the reason:
migration_address must not be the address of the local machine:
127.0.0.1
The error is easy to fix up in the config file, but my question is:
Why must migration_address not be the address of the local machine?
This comes from:
commit 5e0561e115de10da342296bb7c7361e91e368d9c
Author: Chen Fan <chen.fan.fnst(a)cn.fujitsu.com>
Date: Tue Oct 7 12:07:32 2014 +0800
conf: Check whether migration_address is localhost
When enabling the migration_address option, by default it is
set to "127.0.0.1", but it's not a valid address for migration.
so we should add verification and set the default migration_address
to "0.0.0.0".
Signed-off-by: Chen Fan <chen.fan.fnst(a)cn.fujitsu.com>
Signed-off-by: Ján Tomko <jtomko(a)redhat.com>
I checked the mailing list archive. AFAICS, the earliest appearance of
this check is:
https://www.redhat.com/archives/libvir-list/2014-August/msg01503.html
The validity of the proposed check was never questioned, nor explained.
What gives?
BTW, my purpose is not in-host migration (perhaps that's indeed
unsupported, I don't know); I just want to lock down the incoming
migration port (and not just with firewall rules).
If there's a way to disable incoming migration in libvirtd, I'd be
interested in that.
Thanks!
Laszlo
8 years, 12 months
[libvirt] ANNOUNCE: virt-manager 1.3.0 released
by Cole Robinson
I'm happy to announce the release of virt-manager 1.3.0!
virt-manager is a desktop application for managing KVM, Xen, and LXC
virtualization via libvirt.
The release can be downloaded from:
http://virt-manager.org/download/
This release includes:
- Git hosting moved to http://github.com/virt-manager/virt-manager
- Switch translation infrastructure from transifex to fedora.zanata.org
- Add dogtail UI tests and infrastructure
- Improved support for s390x kvm (Kevin Zhao)
- virt-install and virt-manager now remove created disk images if VM
install startup fails
- Replace urlgrabber usage with requests and urllib2
- virt-install: add --network virtualport support for openvswitch
(Daniel P. Berrange)
- virt-install: support multiple --security labels
- virt-install: support --features kvm_hidden=on|off (Pavel Hrdina)
- virt-install: add --features pmu=on|off
- virt-install: add --features pvspinlock=on|off (Abhijeet Kasurde)
- virt-install: add --events on_lockfailure=on|off (Abhijeet Kasurde)
- virt-install: add --network link_state=up|down
- virt-install: add --vcpu placement=static|auto
Thanks to everyone who has contributed to this release through testing,
bug reporting, submitting patches, and otherwise sending in feedback!
Thanks,
Cole
8 years, 12 months
[libvirt] [PATCH v2 0/7] qemu: Add support for -incoming defer
by Jiri Denemark
Traditionally, we pass incoming migration URI on QEMU command line,
which has some drawbacks. Depending on the URI QEMU may initialize its
migration state immediately without giving us a chance to set any
additional migration parameters (this applies mainly for fd: URIs). For
some URIs the monitor may be completely blocked from the beginning until
migration is finished, which means we may be stuck in qmp_capabilities
command without being able to send any QMP commands.
QEMU solved this by introducing "defer" parameter for -incoming command
line option. This will tell QEMU to prepare for an incoming migration
while the actual incoming URI is sent using migrate-incoming QMP
command. Before calling this command we can normally talk to the
monitor and even set any migration parameters which will be honored by
the incoming migration.
The first 17 patches from the original series were already pushed. This
is the rest of the series. See individual patches for changes from
previous version.
Jiri Denemark (7):
qemu: Introduce qemuProcessInit
qemu: Introduce qemuProcessLaunch
qemu: Introduce qemuProcessFinishStartup
qemu: Separate incoming URI generation from qemuMigrationPrepareAny
qemu: Kill QEMU process if Prepare phase fails
qemu: Skip starting NBD servers for offline migration
qemu: Use qemuProcessLaunch in migration Prepare phase
src/qemu/qemu_migration.c | 250 ++++++++++++++----------
src/qemu/qemu_process.c | 488 +++++++++++++++++++++++++++++-----------------
src/qemu/qemu_process.h | 19 ++
3 files changed, 467 insertions(+), 290 deletions(-)
--
2.6.3
9 years
[libvirt] [PATCH] apparmor: add missing qemu binaries
by Guido Günther
This adds the qemu binaries as of 1.2.4 in Debian. It also removes a
duplicate sparc64 entry.
---
examples/apparmor/libvirt-qemu | 21 ++++++++++++++++++++-
1 file changed, 20 insertions(+), 1 deletion(-)
diff --git a/examples/apparmor/libvirt-qemu b/examples/apparmor/libvirt-qemu
index c80ece7..efb4873 100644
--- a/examples/apparmor/libvirt-qemu
+++ b/examples/apparmor/libvirt-qemu
@@ -75,9 +75,12 @@
/usr/bin/kvm rmix,
/usr/bin/qemu rmix,
/usr/bin/qemu-kvm rmix,
+ /usr/bin/qemu-system-aarch64 rmix,
+ /usr/bin/qemu-system-alpha rmix,
/usr/bin/qemu-system-arm rmix,
/usr/bin/qemu-system-cris rmix,
/usr/bin/qemu-system-i386 rmix,
+ /usr/bin/qemu-system-lm32 rmix,
/usr/bin/qemu-system-m68k rmix,
/usr/bin/qemu-system-microblaze rmix,
/usr/bin/qemu-system-microblazeel rmix,
@@ -85,14 +88,22 @@
/usr/bin/qemu-system-mips64 rmix,
/usr/bin/qemu-system-mips64el rmix,
/usr/bin/qemu-system-mipsel rmix,
+ /usr/bin/qemu-system-moxie rmix,
+ /usr/bin/qemu-system-or32 rmix,
/usr/bin/qemu-system-ppc rmix,
/usr/bin/qemu-system-ppc64 rmix,
/usr/bin/qemu-system-ppcemb rmix,
+ /usr/bin/qemu-system-s390x rmix,
/usr/bin/qemu-system-sh4 rmix,
/usr/bin/qemu-system-sh4eb rmix,
/usr/bin/qemu-system-sparc rmix,
/usr/bin/qemu-system-sparc64 rmix,
+ /usr/bin/qemu-system-tricore rmix,
+ /usr/bin/qemu-system-unicore32 rmix,
/usr/bin/qemu-system-x86_64 rmix,
+ /usr/bin/qemu-system-xtensa rmix,
+ /usr/bin/qemu-system-xtensaeb rmix,
+ /usr/bin/qemu-aarch64 rmix,
/usr/bin/qemu-alpha rmix,
/usr/bin/qemu-arm rmix,
/usr/bin/qemu-armeb rmix,
@@ -102,16 +113,24 @@
/usr/bin/qemu-microblaze rmix,
/usr/bin/qemu-microblazeel rmix,
/usr/bin/qemu-mips rmix,
+ /usr/bin/qemu-mips64 rmix,
+ /usr/bin/qemu-mips64el rmix,
/usr/bin/qemu-mipsel rmix,
+ /usr/bin/qemu-mipsn32 rmix,
+ /usr/bin/qemu-mipsn32el rmix,
+ /usr/bin/qemu-nbd rmix,
+ /usr/bin/qemu-or32 rmix,
/usr/bin/qemu-ppc rmix,
/usr/bin/qemu-ppc64 rmix,
/usr/bin/qemu-ppc64abi32 rmix,
+ /usr/bin/qemu-ppc64le rmix,
+ /usr/bin/qemu-s390x rmix,
/usr/bin/qemu-sh4 rmix,
/usr/bin/qemu-sh4eb rmix,
/usr/bin/qemu-sparc rmix,
- /usr/bin/qemu-sparc64 rmix,
/usr/bin/qemu-sparc32plus rmix,
/usr/bin/qemu-sparc64 rmix,
+ /usr/bin/qemu-unicore32 rmix,
/usr/bin/qemu-x86_64 rmix,
/usr/{lib,lib64}/qemu/block-curl.so mr,
/usr/{lib,lib64}/qemu/block-rbd.so mr,
--
2.6.2
9 years
[libvirt] how to know if PCI device has SR-IOV PF capability
by Moshe Levi
Hi,
I was looking on the output of virsh nodedev-dumpxml on a PCI to see if it has SR-IOV PF capability.
It seem that if the virtual functions are enables the xml look like [1] but if the PCI has no VFs enabled the
output is like in [2].
As you can see for PCI which has no VFs the <capability type='virt_functions'> tag doen't exist.
Is this by design?
I would except that <capability type='virt_functions'/> tag with empty elements will also be include in that case.
Thanks,
Moshe Levi.
[1] root@r-ufm152:~# virsh nodedev-dumpxml pci_0000_03_00_0
<device>
<name>pci_0000_03_00_0</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>0</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/>
</capability>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>
[2] root@r-ufm152:~# virsh nodedev-dumpxml pci_0000_03_00_1
<device>
<name>pci_0000_03_00_1</name>
<path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path>
<parent>pci_0000_00_02_0</parent>
<driver>
<name>mlx5_core</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>3</bus>
<slot>0</slot>
<function>1</function>
<product id='0x1013'>MT27700 Family [ConnectX-4]</product>
<vendor id='0x15b3'>Mellanox Technologies</vendor>
<iommuGroup number='15'>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
<address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/>
</iommuGroup>
<numa node='0'/>
<pci-express>
<link validity='cap' port='0' speed='8' width='16'/>
<link validity='sta' speed='8' width='16'/>
</pci-express>
</capability>
</device>
9 years
[libvirt] [PATCH java] Add support for Libvirt Qemu Library
by Wido den Hollander
This allows us to send Qemu Guest Agent commands to running domains.
Signed-off-by: Wido den Hollander <wido(a)widodh.nl>
---
src/main/java/org/libvirt/Domain.java | 36 ++++++++++++++++++++++++++
src/main/java/org/libvirt/Library.java | 3 +++
src/main/java/org/libvirt/jna/LibvirtQemu.java | 16 ++++++++++++
3 files changed, 55 insertions(+)
create mode 100644 src/main/java/org/libvirt/jna/LibvirtQemu.java
diff --git a/src/main/java/org/libvirt/Domain.java b/src/main/java/org/libvirt/Domain.java
index 83a500c..c24df48 100644
--- a/src/main/java/org/libvirt/Domain.java
+++ b/src/main/java/org/libvirt/Domain.java
@@ -8,6 +8,7 @@ import org.libvirt.jna.CString;
import org.libvirt.jna.DomainPointer;
import org.libvirt.jna.DomainSnapshotPointer;
import org.libvirt.jna.Libvirt;
+import org.libvirt.jna.LibvirtQemu;
import org.libvirt.jna.SizeT;
import org.libvirt.jna.virDomainBlockInfo;
import org.libvirt.jna.virDomainBlockStats;
@@ -22,6 +23,7 @@ import org.libvirt.event.LifecycleListener;
import org.libvirt.event.PMWakeupListener;
import org.libvirt.event.PMSuspendListener;
import static org.libvirt.Library.libvirt;
+import static org.libvirt.Library.libvirtqemu;
import static org.libvirt.ErrorHandler.processError;
import static org.libvirt.ErrorHandler.processErrorIfZero;
@@ -141,6 +143,23 @@ public class Domain {
public static final int NO_METADATA = (1 << 4);
}
+ static final class QemuAgentFlags {
+ /**
+ * Do not wait for a result
+ */
+ public static final int VIR_DOMAIN_QEMU_AGENT_COMMAND_NOWAIT = 0;
+
+ /**
+ * Use default timeout value
+ */
+ public static final int VIR_DOMAIN_QEMU_AGENT_COMMAND_DEFAULT = -1;
+
+ /**
+ * Block forever waiting for a result
+ */
+ public static final int VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK = -2;
+ }
+
/**
* the native virDomainPtr.
*/
@@ -1556,4 +1575,21 @@ public class Domain {
return processError(libvirt.virDomainUpdateDeviceFlags(VDP, xml, flags));
}
+ /**
+ * Send a Qemu Guest Agent command to a domain
+ *
+ * @param cmd
+ * The command which has to be send
+ * @param timeout
+ * The timeout for waiting on an answer. See QemuAgentFlags for more information.
+ * @param flags
+ * Flags to be send
+ * @return String
+ * @throws LibvirtException
+ */
+ public String qemuAgentCommand(String cmd, int timeout, int flags) throws LibvirtException {
+ CString result = libvirtqemu.virDomainQemuAgentCommand(this.VDP, cmd, timeout, flags);
+ processError(result);
+ return result.toString();
+ }
}
diff --git a/src/main/java/org/libvirt/Library.java b/src/main/java/org/libvirt/Library.java
index 8e054c6..30f15be 100644
--- a/src/main/java/org/libvirt/Library.java
+++ b/src/main/java/org/libvirt/Library.java
@@ -2,6 +2,7 @@ package org.libvirt;
import org.libvirt.jna.Libvirt;
import org.libvirt.jna.Libvirt.VirEventTimeoutCallback;
+import org.libvirt.jna.LibvirtQemu;
import org.libvirt.jna.CString;
import static org.libvirt.ErrorHandler.processError;
@@ -34,6 +35,7 @@ public final class Library {
};
final static Libvirt libvirt;
+ final static LibvirtQemu libvirtqemu;
// an empty string array constant
// prefer this over creating empty arrays dynamically.
@@ -47,6 +49,7 @@ public final class Library {
} catch (Exception e) {
e.printStackTrace();
}
+ libvirtqemu = LibvirtQemu.INSTANCE;
}
private Library() {}
diff --git a/src/main/java/org/libvirt/jna/LibvirtQemu.java b/src/main/java/org/libvirt/jna/LibvirtQemu.java
new file mode 100644
index 0000000..82213e9
--- /dev/null
+++ b/src/main/java/org/libvirt/jna/LibvirtQemu.java
@@ -0,0 +1,16 @@
+package org.libvirt.jna;
+
+import com.sun.jna.Library;
+import com.sun.jna.Native;
+import com.sun.jna.Platform;
+
+/**
+ * The libvirt Qemu interface which is exposed via JNA
+ */
+
+public interface LibvirtQemu extends Library {
+
+ LibvirtQemu INSTANCE = (LibvirtQemu) Native.loadLibrary(Platform.isWindows() ? "virt-qemu-0" : "virt-qemu", LibvirtQemu.class);
+
+ CString virDomainQemuAgentCommand(DomainPointer virDomainPtr, String cmd, int timeout, int flags);
+}
--
1.9.1
9 years