[libvirt] [test-API][PATCH v4] Add test module ownership_test
by Wayne Sun
* repos/domain/ownership_test.py
Setting the dynamic_ownership value in /etc/libvirt/qemu.conf,
check the ownership of file after domain save and restore. Test
could be on local or root_squash nfs. The default owner of the
saved domain file is qemu:qemu in this case.
---
repos/domain/ownership_test.py | 316 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 316 insertions(+), 0 deletions(-)
create mode 100644 repos/domain/ownership_test.py
diff --git a/repos/domain/ownership_test.py b/repos/domain/ownership_test.py
new file mode 100644
index 0000000..74620f4
--- /dev/null
+++ b/repos/domain/ownership_test.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+"""Setting the dynamic_ownership in /etc/libvirt/qemu.conf,
+ check the ownership of saved domain file. Test could be on
+ local or root_squash nfs. The default owner of the saved
+ domain file is qemu:qemu in this case.
+ domain:ownership_test
+ guestname
+ #GUESTNAME#
+ dynamic_ownership
+ enable|disable
+ use_nfs
+ enable|disable
+"""
+
+__author__ = 'Wayne Sun: gsun(a)redhat.com'
+__date__ = 'Mon Jul 25, 2011'
+__version__ = '0.1.0'
+__credits__ = 'Copyright (C) 2011 Red Hat, Inc.'
+__all__ = ['ownership_test']
+
+import os
+import re
+import sys
+
+QEMU_CONF = "/etc/libvirt/qemu.conf"
+SAVE_FILE = "/mnt/test.save"
+TEMP_FILE = "/tmp/test.save"
+
+def append_path(path):
+ """Append root path of package"""
+ if path not in sys.path:
+ sys.path.append(path)
+
+from lib import connectAPI
+from lib import domainAPI
+from utils.Python import utils
+from exception import LibvirtAPI
+
+pwd = os.getcwd()
+result = re.search('(.*)libvirt-test-API', pwd)
+append_path(result.group(0))
+
+def return_close(conn, logger, ret):
+ """close hypervisor connection and return the given value"""
+ conn.close()
+ logger.info("closed hypervisor connection")
+ return ret
+
+def check_params(params):
+ """Verify inputing parameter dictionary"""
+ logger = params['logger']
+ keys = ['guestname', 'dynamic_ownership', 'use_nfs']
+ for key in keys:
+ if key not in params:
+ logger.error("%s is required" %key)
+ return 1
+ return 0
+
+def check_domain_running(domobj, guestname, logger):
+ """ check if the domain exists, may or may not be active """
+ guest_names = domobj.get_list()
+
+ if guestname not in guest_names:
+ logger.error("%s doesn't exist or not running" % guestname)
+ return 1
+ else:
+ return 0
+
+def nfs_setup(util, logger):
+ """setup nfs on localhost
+ """
+ logger.info("set nfs service")
+ cmd = "echo /tmp *\(rw,root_squash\) >> /etc/exports"
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to config nfs export")
+ return 1
+
+ logger.info("restart nfs service")
+ cmd = "service nfs restart"
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to restart nfs service")
+ return 1
+ else:
+ for i in range(len(out)):
+ logger.info(out[i])
+
+ return 0
+
+def chown_file(util, filepath, logger):
+ """touch a file and setting the chown
+ """
+ if os.path.exists(filepath):
+ os.remove(filepath)
+
+ touch_cmd = "touch %s" % filepath
+ logger.info(touch_cmd)
+ ret, out = util.exec_cmd(touch_cmd, shell=True)
+ if ret:
+ logger.error("failed to touch a new file")
+ logger.error(out[0])
+ return 1
+
+ logger.info("set chown of %s as 107:107" % filepath)
+ chown_cmd = "chown 107:107 %s" % filepath
+ ret, out = util.exec_cmd(chown_cmd, shell=True)
+ if ret:
+ logger.error("failed to set the ownership of %s" % filepath)
+ return 1
+
+ logger.info("set %s mode as 664" % filepath)
+ cmd = "chmod 664 %s" % filepath
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to set the mode of %s" % filepath)
+ return 1
+
+ return 0
+
+def prepare_env(util, dynamic_ownership, use_nfs, logger):
+ """configure dynamic_ownership in /etc/libvirt/qemu.conf,
+ set chown of the file to save
+ """
+ if dynamic_ownership == 'enable':
+ d_ownership = 1
+ elif dynamic_ownership == 'disable':
+ d_ownership = 0
+ else:
+ logger.error("wrong dynamic_ownership value")
+ return 1
+
+ logger.info("set the dynamic ownership in %s as %s" % \
+ (QEMU_CONF, d_ownership))
+ set_cmd = "echo dynamic_ownership = %s >> %s" % \
+ (d_ownership, QEMU_CONF)
+ ret, out = util.exec_cmd(set_cmd, shell=True)
+ if ret:
+ logger.error("failed to set dynamic ownership")
+ return 1
+
+ logger.info("restart libvirtd")
+ restart_cmd = "service libvirtd restart"
+ ret, out = util.exec_cmd(restart_cmd, shell=True)
+ if ret:
+ logger.error("failed to restart libvirtd")
+ return 1
+ else:
+ for i in range(len(out)):
+ logger.info(out[i])
+
+ if use_nfs == 'enable':
+ filepath = TEMP_FILE
+ elif use_nfs == 'disable':
+ filepath = SAVE_FILE
+ else:
+ logger.error("wrong use_nfs value")
+ return 1
+
+ ret = chown_file(util, filepath, logger)
+ if ret:
+ return 1
+
+ if use_nfs == 'enable':
+ ret = nfs_setup(util, logger)
+ if ret:
+ return 1
+
+ cmd = "setsebool virt_use_nfs 1"
+ logger.info(cmd)
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("Failed to setsebool virt_use_nfs")
+ return 1
+
+ logger.info("mount the nfs path to /mnt")
+ mount_cmd = "mount -o vers=3 127.0.0.1:/tmp /mnt"
+ ret, out = util.exec_cmd(mount_cmd, shell=True)
+ if ret:
+ logger.error("Failed to mount the nfs path")
+ for i in range(len(out)):
+ logger.info(out[i])
+ return 1
+
+ return 0
+
+def ownership_get(logger):
+ """check the ownership of file"""
+
+ statinfo = os.stat(SAVE_FILE)
+ uid = statinfo.st_uid
+ gid = statinfo.st_gid
+
+ logger.info("the uid and gid of %s is %s:%s" %(SAVE_FILE, uid, gid))
+
+ return 0, uid, gid
+
+def ownership_test(params):
+ """Save a domain to a file, check the ownership of
+ the file after save and restore
+ """
+ # Initiate and check parameters
+ params_check_result = check_params(params)
+ if params_check_result:
+ return 1
+
+ logger = params['logger']
+ guestname = params['guestname']
+ dynamic_ownership = params['dynamic_ownership']
+ use_nfs = params['use_nfs']
+
+ util = utils.Utils()
+
+ # set env
+ logger.info("prepare the environment")
+ ret = prepare_env(util, dynamic_ownership, use_nfs, logger)
+ if ret:
+ logger.error("failed to prepare the environment")
+ return 1
+
+ # Connect to local hypervisor connection URI
+ uri = util.get_uri('127.0.0.1')
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+
+ # save domain to the file
+ logger.info("save domain %s to the file %s" % (guestname, SAVE_FILE))
+ domobj = domainAPI.DomainAPI(virconn)
+
+ logger.info("check the domain state")
+ ret = check_domain_running(domobj, guestname, logger)
+ if ret:
+ return return_close(conn, logger, 1)
+
+ try:
+ domobj.save(guestname, SAVE_FILE)
+ logger.info("Success save domain %s to %s" % (guestname, SAVE_FILE))
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to save domain %s to %s" % \
+ (guestname, SAVE_FILE))
+ return return_close(conn, logger, 1)
+
+ logger.info("check the ownership of %s after save" % SAVE_FILE)
+ ret, uid, gid = ownership_get(logger)
+ if use_nfs == 'enable':
+ if uid == 107 and gid == 107:
+ logger.info("As expected, the chown not change.")
+ else:
+ logger.error("The chown of %s is %s:%s, it's not as expected" % \
+ (SAVE_FILE, uid, gid))
+ return return_close(conn, logger, 1)
+ elif use_nfs == 'disable':
+ if dynamic_ownership == 'enable':
+ if uid == 0 and gid == 0:
+ logger.info("As expected, the chown changed to root:root")
+ else:
+ logger.error("The chown of %s is %s:%s, it's not as expected" % \
+ (SAVE_FILE, uid, gid))
+ return return_close(conn, logger, 1)
+ elif dynamic_ownership == 'disable':
+ if uid == 107 and gid == 107:
+ logger.info("As expected, the chown not change.")
+ else:
+ logger.error("The chown of %s is %s:%s, it's not as expected" % \
+ (SAVE_FILE, uid, gid))
+ return return_close(conn, logger, 1)
+
+ # restore domain from file
+ logger.info("restore the domain from the file")
+ try:
+ domobj.restore(guestname, SAVE_FILE)
+ logger.info("Success restore domain %s from %s" % \
+ (guestname, SAVE_FILE))
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to restore domain %s from %s" % \
+ (guestname, SAVE_FILE))
+ return return_close(conn, logger, 1)
+
+ logger.info("check the ownership of %s after restore" % SAVE_FILE)
+ ret, uid, gid = ownership_get(logger)
+ if uid == 107 and gid == 107:
+ logger.info("As expected, the chown not change.")
+ else:
+ logger.error("The chown of %s is %s:%s, not change back as expected" % \
+ (SAVE_FILE, uid, gid))
+ return return_close(conn, logger, 1)
+
+ return return_close(conn, logger, 0)
+
+def ownership_test_clean(params):
+ """clean testing environment"""
+ logger = params['logger']
+ use_nfs = params['use_nfs']
+
+ util = utils.Utils()
+
+ if use_nfs == 'enable':
+ if os.path.ismount("/mnt"):
+ umount_cmd = "umount /mnt"
+ ret, out = util.exec_cmd(umount_cmd, shell=True)
+ if ret:
+ logger.error("Failed to unmount the nfs path")
+ for i in range(len(out)):
+ logger.error(out[i])
+
+ filepath = TEMP_FILE
+ elif use_nfs == 'disable':
+ filepath = SAVE_FILE
+
+ if os.path.exists(filepath):
+ os.remove(filepath)
+
--
1.7.1
13 years, 2 months
[libvirt] [PATCH] Fix typos for snapshot-create-as in virsh help cmd and doc
by Nan Zhang
* tools/virsh.c: Using VSH_OT_STRING instead of VSH_OT_ARGV for diskspec
* tools/virsh.pod: Fix missing -- tag for snapshot-create-as in docs
---
tools/virsh.c | 2 +-
tools/virsh.pod | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/virsh.c b/tools/virsh.c
index 3c6e65a..5c4610e 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -12630,7 +12630,7 @@ static const vshCmdOptDef opts_snapshot_create_as[] = {
{"no-metadata", VSH_OT_BOOL, 0, N_("take snapshot but create no metadata")},
{"halt", VSH_OT_BOOL, 0, N_("halt domain after snapshot is created")},
{"disk-only", VSH_OT_BOOL, 0, N_("capture disk state but not vm state")},
- {"diskspec", VSH_OT_ARGV, 0,
+ {"diskspec", VSH_OT_STRING, 0,
N_("disk attributes: disk[,snapshot=type][,driver=type][,file=name]")},
{NULL, 0, 0, NULL}
};
diff --git a/tools/virsh.pod b/tools/virsh.pod
index e82567d..c82cc45 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -1773,7 +1773,7 @@ by command such as B<destroy> or by internal guest action).
=item B<snapshot-create-as> I<domain> {[I<--print-xml>]
| [I<--no-metadata>] [I<--halt>]} [I<name>] [I<description>]
-[I<--disk-only> [I<diskspec>]...]
+[I<--disk-only>] [I<--diskspec>]...
Create a snapshot for domain I<domain> with the given <name> and
<description>; if either value is omitted, libvirt will choose a
@@ -1784,8 +1784,8 @@ inactive state after the snapshot is created, and if I<--disk-only>
is specified, the snapshot will not include vm state.
The I<--disk-only> flag is used to request a disk-only snapshot. When
-this flag is in use, the command can also take additional I<diskspec>
-arguments to add <disk> elements to the xml. Each <diskspec> is in the
+this flag is in use, the command can also take additional I<--diskspec>
+arguments to add <disk> elements to the xml. Each I<--diskspec> is in the
form B<disk[,snapshot=type][,driver=type][,file=name]>. To include a
literal comma in B<disk> or in B<file=name>, escape it with a second
comma. For example, a diskspec of "vda,snapshot=external,file=/path/to,,new"
--
1.7.4.4
13 years, 2 months
[libvirt] [PATCH] doc: Add statment about permissions needed to do a core dump
by Peter Krempa
Documentation did not specify, that some permissions are required on
target path for coredump for the user running the hypervisor.
---
src/libvirt.c | 4 +++-
tools/virsh.pod | 3 +++
2 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/src/libvirt.c b/src/libvirt.c
index c32c7a6..ee88d87 100644
--- a/src/libvirt.c
+++ b/src/libvirt.c
@@ -2777,7 +2777,9 @@ error:
*
* This method will dump the core of a domain on a given file for analysis.
* Note that for remote Xen Daemon the file path will be interpreted in
- * the remote host.
+ * the remote host. Hypervisors may require execute and/or write permissions
+ * for destination path specified by argument @to for user running the
+ * hypervisor.
*
* If @flags includes VIR_DUMP_CRASH, then leave the guest shut off with
* a crashed state after the dump completes. If @flags includes
diff --git a/tools/virsh.pod b/tools/virsh.pod
index e82567d..ba7af15 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -621,6 +621,9 @@ rather than merely left in a paused state.
If I<--bypass-cache> is specified, the save will avoid the file system
cache, although this may slow down the operation.
+NOTE: Some hypervisors may require execute and/or write permissions on
+I<corefilepath> for user running the hypervisor.
+
=item B<dumpxml> I<domain-id> [I<--inactive>] [I<--security-info>]
[I<--update-cpu>]
--
1.7.3.4
13 years, 2 months
[libvirt] [PATCH] python: Fix libvirt.py generation to include virterror info
by Cole Robinson
Recent generator refactoring broke libvirt.py. With this patch, libvirt.py
is generated exactly the same as before the offending commit.
---
python/generator.py | 7 +++++--
1 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/python/generator.py b/python/generator.py
index 327e1d5..d855d6b 100755
--- a/python/generator.py
+++ b/python/generator.py
@@ -120,7 +120,8 @@ class docParser(xml.sax.handler.ContentHandler):
if attrs.has_key('field'):
self.function_return_field = attrs['field']
elif tag == 'enum':
- if attrs['file'] == "libvirt":
+ if (attrs['file'] == "libvirt" or
+ attrs['file'] == "virterror"):
enum(attrs['type'],attrs['name'],attrs['value'])
elif attrs['file'] == "libvirt-qemu":
qemu_enum(attrs['type'],attrs['name'],attrs['value'])
@@ -130,7 +131,9 @@ class docParser(xml.sax.handler.ContentHandler):
print "end %s" % tag
if tag == 'function':
if self.function != None:
- if self.function_module == "libvirt":
+ if (self.function_module == "libvirt" or
+ self.function_module == "event" or
+ self.function_module == "virterror"):
function(self.function, self.function_descr,
self.function_return, self.function_args,
self.function_file, self.function_module,
--
1.7.4.4
13 years, 2 months
[libvirt] [PATCH 0/7] Expose QEMU APIs to Python binding
by Osier Yang
This patchset is to expose QEMU APIs to Python binding, as we
don't intend to support the QEMU APIs officially, it's exposed
seperately with general libvirt APIs with standalone
libvirt_qemu.py and libvirtmod_qemu.so. And there is no class
for QEMU APIs, there are written directly as function in
libvirt_qemu.py.
How to use the APIs.
#! /usr/bin/python -u
import libvirt
import libvirt_qemu
conn = libvirt.open(None)
dom = conn.lookupByName('test')
print libvirt_qemu.qemuMonitorCommand(dom, 'info blockstats', 1)
libvirt_qemu.qemuAttach(conn, 2307, 0)
PS: make check/make dist/make rpm are passed.
[PATCH 1/7] qemu_api: Modify apibuild.py to generate docs for QEMU
[PATCH 2/7] qemu_api: Update Makefile for subdir docs
[PATCH 3/7] qemu_api: Add comments for API
[PATCH 4/7] qemu_api: Add override XML and C files for QEMU APIs
[PATCH 5/7] qemu_api: Update Py binding generator to generate files
[PATCH 6/7] qemu_api: Update Makefile to generate libvirtmod_qemu
[PATCH 7/7] qemu_api: Update libvirt spec file
Regards,
Osier
13 years, 2 months
[libvirt] [PATCH] doc: virsh: Fix command name in man page
by Peter Krempa
Fix cut&paste error having command named domif-setlink instead of
domif-getlink.
---
tools/virsh.pod | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/tools/virsh.pod b/tools/virsh.pod
index e82567d..27d8f42 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -518,7 +518,7 @@ Modify link state of the domain's virtual interface. Possible values for
state are "up" and "down. If --persistent is specified, only the persistent
configuration of the domain is modified.
-=item B<domif-setlink> I<domain> I<interface-MAC> I<--persistent>
+=item B<domif-getlink> I<domain> I<interface-MAC> I<--persistent>
Query link state of the domain's virtual interface. If --persistent
is specified, query the persistent configuration.
--
1.7.3.4
13 years, 2 months
[libvirt] [PATCH] sanlock: add missing test command in virt-sanlock-cleanup.in
by ajia@redhat.com
From: Alex Jia <ajia(a)redhat.com>
* tools/virt-sanlock-cleanup.in: fix missing test command when judging
second condition.
https://bugzilla.redhat.com/show_bug.cgi?id=738534
Signed-off-by: Alex Jia <ajia(a)redhat.com>
---
tools/virt-sanlock-cleanup.in | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/tools/virt-sanlock-cleanup.in b/tools/virt-sanlock-cleanup.in
index d8a40a5..72cd5e0 100644
--- a/tools/virt-sanlock-cleanup.in
+++ b/tools/virt-sanlock-cleanup.in
@@ -29,7 +29,7 @@ cd "$LOCKDIR" || exit 1
for MD5 in *
do
- if test $MD5 != '*' && $MD5 != $LOCKSPACE ; then
+ if test $MD5 != '*' && test $MD5 != $LOCKSPACE ; then
RESOURCE="$LOCKSPACE:$MD5:$LOCKDIR/$MD5:0"
notify -n "Cleanup: $RESOURCE "
sanlock client command -r $RESOURCE -c /bin/rm -f "$LOCKDIR/$MD5" 2>/dev/null
--
1.7.1
13 years, 2 months
[libvirt] libvirt(-java): virDomainMigrateSetMaxDowntime
by Thomas Treutner
Hi,
I'm facing some troubles with virDomainMigrate &
virDomainMigrateSetMaxDowntime. The core problem is that KVM's default
value for the maximum allowed downtime is 30ms (max_downtime in
migration.c, it's nanoseconds there; 0.12.3) which is too low for my VMs
when they're busy (~50% CPU util and above). Migrations then take
literally forever, I had to abort them after 15 minutes or so. I'm using
GBit Ethernet, so plenty bandwidth should be available. Increasing the
allowed downtime to 50ms seems to help, but I have not tested situations
where the VM is completely utilized. Anyways, the default value is too
low for me, so I tried virDomainMigrateSetMaxDowntime resp. the Java
wrapper function.
Here I'm facing a problem I can overcome only with a quite crude hack:
org.libvirt.Domain.migrate(..) blocks until the migration is done, which
is of course reasonable. So I tried calling migrateSetMaxDowntime(..)
before migrating, causing an error:
"Requested operation is not valid: domain is not being migrated"
This tells me that calling migrateSetMaxDowntime is only allowed during
migrations. As I'm migrating VMs automatically and without any user
intervention I'd need to create some glue code that runs in an extra
thread, waiting "some time" hoping that the migration was kicked off in
the main thread yet and then calling migrateSetMaxDowntime. I'd like to
avoid such quirks in the long run, if possible.
So my question: Would it be possible to extend the migrate() method
resp. virDomainMigrate() function with an optional maxDowntime parameter
that is passed down as QEMU_JOB_SIGNAL_MIGRATE_DOWNTIME so that
qemuDomainWaitForMigrationComplete would set the value? Or are there
easier ways?
Thanks and regards,
-t
13 years, 2 months
[libvirt] [PATCH] qemu: Prevent disk corruption on domain shutdown
by Jiri Denemark
Ever since we introduced fake reboot, we call qemuProcessKill as a
reaction to SHUTDOWN event. Unfortunately, qemu doesn't guarantee it
flushed all internal buffers before sending SHUTDOWN, in which case
killing the process forcibly may result in (virtual) disk corruption.
By sending SIGQUIT instead of SIGTERM followed by SIGKILL we tell qemu
to flush all buffers and exit. Once qemu exits, we will see an EOF on
monitor connection and tear down the domain. In case qemu ignores
SIGQUIT or just hangs there, the process stays running but that's not
any different from a possible hang anytime during the shutdown process
so I think it's just fine.
---
src/qemu/qemu_process.c | 21 +++++++++++++++++++--
src/qemu/qemu_process.h | 1 +
2 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index f8a8475..8a12e2a 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -445,12 +445,12 @@ qemuProcessHandleShutdown(qemuMonitorPtr mon ATTRIBUTE_UNUSED,
qemuProcessFakeReboot,
vm) < 0) {
VIR_ERROR(_("Failed to create reboot thread, killing domain"));
- qemuProcessKill(vm);
+ qemuProcessQuit(vm);
if (virDomainObjUnref(vm) == 0)
vm = NULL;
}
} else {
- qemuProcessKill(vm);
+ qemuProcessQuit(vm);
}
if (vm)
virDomainObjUnlock(vm);
@@ -3182,6 +3182,23 @@ cleanup:
}
+void qemuProcessQuit(virDomainObjPtr vm)
+{
+ VIR_DEBUG("vm=%s pid=%d", vm->def->name, vm->pid);
+
+ if (!virDomainObjIsActive(vm)) {
+ VIR_DEBUG("VM '%s' not active", vm->def->name);
+ return;
+ }
+
+ if (virKillProcess(vm->pid, SIGQUIT) < 0 && errno != ESRCH) {
+ char ebuf[1024];
+ VIR_WARN("Failed to kill process %d: %s",
+ vm->pid, virStrerror(errno, ebuf, sizeof(ebuf)));
+ }
+}
+
+
void qemuProcessKill(virDomainObjPtr vm)
{
int i;
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index 96ba3f3..ad14cf7 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -68,6 +68,7 @@ int qemuProcessAttach(virConnectPtr conn,
virDomainChrSourceDefPtr monConfig,
bool monJSON);
+void qemuProcessQuit(virDomainObjPtr vm);
void qemuProcessKill(virDomainObjPtr vm);
int qemuProcessAutoDestroyInit(struct qemud_driver *driver);
--
1.7.6.1
13 years, 2 months
[libvirt] [PATCH v2 0/2] add blkio.weight_device support
by Hu Tao
This series adds support for blkio.weight_device.
changes from v1:
- update remote_protocol-structs to make `make check` pass
- fix some memleaks
- compared the sizes of remote_typed_param before and after
patch 1 using pdwtags, doesn't change
- libvirtd(before patch) returns an error message
error: Unable to decode message payload
if connect to it and send a remote_typed_param(after patch)
via virsh(after patch)
Hu Tao (2):
Add VIR_TYPED_PARAM_STRING
add interface for blkio.weight_device
daemon/remote.c | 20 +++++
include/libvirt/libvirt.h.in | 13 +++-
src/conf/domain_conf.c | 142 ++++++++++++++++++++++++++++++++++-
src/conf/domain_conf.h | 15 ++++
src/libvirt_private.syms | 1 +
src/qemu/qemu_cgroup.c | 22 ++++++
src/qemu/qemu_driver.c | 170 +++++++++++++++++++++++++++++++++++++++++-
src/remote/remote_driver.c | 15 ++++
src/remote/remote_protocol.x | 2 +
src/remote_protocol-structs | 2 +
src/util/cgroup.c | 33 ++++++++
src/util/cgroup.h | 3 +
tools/virsh.c | 31 ++++++++
tools/virsh.pod | 5 +-
14 files changed, 467 insertions(+), 7 deletions(-)
--
1.7.3.1
13 years, 2 months