[libvirt] [PATCH v3] virsh: Add more human-friendly output of domblkstat command
by Peter Krempa
Users of virsh complain that output of the domblkstat command
is not intuitive enough. This patch adds explanation of fields
returned by this command to the help section for domblkstat and
the man page of virsh. Also a switch --human is added for
domblkstat that prints the fields with more descriptive
texts.
https://bugzilla.redhat.com/show_bug.cgi?id=731656
Changes to v2:
- Modify for new fields in virDomainBlockStatsFlags
Changes to v1:
- Rebase to current head
---
tools/virsh.c | 114 ++++++++++++++++++++++++++++++++++++++++++++++--------
tools/virsh.pod | 16 +++++++-
2 files changed, 112 insertions(+), 18 deletions(-)
diff --git a/tools/virsh.c b/tools/virsh.c
index 629233f..458252b 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -1054,16 +1054,46 @@ cleanup:
*/
static const vshCmdInfo info_domblkstat[] = {
{"help", N_("get device block stats for a domain")},
- {"desc", N_("Get device block stats for a running domain.")},
+ {"desc", N_("Get device block stats for a running domain.\n\n"
+ " Explanation of fields:\n"
+ " rd_req - count of read operations (old format)\n"
+ " rd_operations - count of read operations (new format)\n"
+ " rd_bytes - count of read bytes\n"
+ " rd_total_times - total time read operations took\n"
+ " wr_req - count of write operations (old format)\n"
+ " wr_operations - count of write operations (new format)\n"
+ " wr_bytes - count of written bytes\n"
+ " flush_operations - count of flush operations\n"
+ " flush_total_times - total time flush operations took\n"
+ " errs - error count")},
{NULL,NULL}
};
static const vshCmdOptDef opts_domblkstat[] = {
{"domain", VSH_OT_DATA, VSH_OFLAG_REQ, N_("domain name, id or uuid")},
{"device", VSH_OT_DATA, VSH_OFLAG_REQ, N_("block device")},
+ {"human", VSH_OT_BOOL, 0, N_("print a more human readable output")},
{NULL, 0, 0, NULL}
};
+struct _domblkstat_human_readable {
+ const char *field;
+ const char *human;
+};
+
+static const struct _domblkstat_human_readable domblkstat_translate[] = {
+ { VIR_DOMAIN_BLOCK_STATS_READ_BYTES, N_("number of read bytes: ") }, /* 0 */
+ { VIR_DOMAIN_BLOCK_STATS_READ_REQ, N_("number of read operations: ") }, /* 1 */
+ { VIR_DOMAIN_BLOCK_STATS_READ_TOTAL_TIMES, N_("total duration of reads: ") }, /* 2 */
+ { VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, N_("number of bytes written: ") }, /* 3 */
+ { VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, N_("number of write operations:") }, /* 4 */
+ { VIR_DOMAIN_BLOCK_STATS_WRITE_TOTAL_TIMES, N_("total duration of writes: ") }, /* 5 */
+ { VIR_DOMAIN_BLOCK_STATS_FLUSH_REQ, N_("number of flush operations:") }, /* 6 */
+ { VIR_DOMAIN_BLOCK_STATS_FLUSH_TOTAL_TIMES, N_("total duration of flushes: ") }, /* 7 */
+ { VIR_DOMAIN_BLOCK_STATS_ERRS, N_("error count: ") }, /* 8 */
+ { NULL, NULL }
+};
+
static bool
cmdDomblkstat (vshControl *ctl, const vshCmd *cmd)
{
@@ -1071,8 +1101,11 @@ cmdDomblkstat (vshControl *ctl, const vshCmd *cmd)
const char *name = NULL, *device = NULL;
struct _virDomainBlockStats stats;
virTypedParameterPtr params = NULL;
+ const char *field_name = NULL;
+ int j = 0;
int rc, nparams = 0;
bool ret = false;
+ bool human = vshCommandOptBool(cmd, "human"); /* enable human readable output */
if (!vshConnectionUsability (ctl, ctl->conn))
return false;
@@ -1104,20 +1137,41 @@ cmdDomblkstat (vshControl *ctl, const vshCmd *cmd)
goto cleanup;
}
- if (stats.rd_req >= 0)
- vshPrint (ctl, "%s rd_req %lld\n", device, stats.rd_req);
+ if (human) {
+ /* human friendly output */
+ vshPrint(ctl, N_("Device: %s\n"), device);
+
+ if (stats.rd_req >= 0)
+ vshPrint (ctl, "%s %lld\n", domblkstat_translate[1].human, stats.rd_req);
+
+ if (stats.rd_bytes >= 0)
+ vshPrint (ctl, "%s %lld\n", domblkstat_translate[0].human, stats.rd_bytes);
+
+ if (stats.wr_req >= 0)
+ vshPrint (ctl, "%s %lld\n", domblkstat_translate[4].human, stats.wr_req);
+
+ if (stats.wr_bytes >= 0)
+ vshPrint (ctl, "%s %lld\n", domblkstat_translate[3].human, stats.wr_bytes);
+
+ if (stats.errs >= 0)
+ vshPrint (ctl, "%s %lld\n", domblkstat_translate[8].human, stats.errs);
+ } else {
+
+ if (stats.rd_req >= 0)
+ vshPrint (ctl, "%s rd_req %lld\n", device, stats.rd_req);
- if (stats.rd_bytes >= 0)
- vshPrint (ctl, "%s rd_bytes %lld\n", device, stats.rd_bytes);
+ if (stats.rd_bytes >= 0)
+ vshPrint (ctl, "%s rd_bytes %lld\n", device, stats.rd_bytes);
- if (stats.wr_req >= 0)
- vshPrint (ctl, "%s wr_req %lld\n", device, stats.wr_req);
+ if (stats.wr_req >= 0)
+ vshPrint (ctl, "%s wr_req %lld\n", device, stats.wr_req);
- if (stats.wr_bytes >= 0)
- vshPrint (ctl, "%s wr_bytes %lld\n", device, stats.wr_bytes);
+ if (stats.wr_bytes >= 0)
+ vshPrint (ctl, "%s wr_bytes %lld\n", device, stats.wr_bytes);
- if (stats.errs >= 0)
- vshPrint (ctl, "%s errs %lld\n", device, stats.errs);
+ if (stats.errs >= 0)
+ vshPrint (ctl, "%s errs %lld\n", device, stats.errs);
+ }
}
} else {
params = vshMalloc(ctl, sizeof(*params) * nparams);
@@ -1129,32 +1183,58 @@ cmdDomblkstat (vshControl *ctl, const vshCmd *cmd)
}
int i;
+
+ /* set for prettier output */
+ if (human) {
+ vshPrint(ctl, N_("Device: %s\n"), device);
+ device = "";
+ }
+
/* XXX: The output sequence will be different. */
for (i = 0; i < nparams; i++) {
+ /* translate messages into a human readable form, if requested */
+ if (human) {
+ /* try to look up the translation into a more human readable form */
+ field_name = NULL;
+ for (j = 0; domblkstat_translate[j].field != NULL; j++) {
+ if (STREQ(params[i].field, domblkstat_translate[j].field)) {
+ field_name = domblkstat_translate[j].human;
+
+ break;
+ }
+ }
+
+ /* translation not found, stick with the default field name */
+ if (!field_name)
+ field_name = params[i].field;
+ } else {
+ field_name = params[i].field;
+ }
+
switch(params[i].type) {
case VIR_TYPED_PARAM_INT:
vshPrint (ctl, "%s %s %d\n", device,
- params[i].field, params[i].value.i);
+ field_name, params[i].value.i);
break;
case VIR_TYPED_PARAM_UINT:
vshPrint (ctl, "%s %s %u\n", device,
- params[i].field, params[i].value.ui);
+ field_name, params[i].value.ui);
break;
case VIR_TYPED_PARAM_LLONG:
vshPrint (ctl, "%s %s %lld\n", device,
- params[i].field, params[i].value.l);
+ field_name, params[i].value.l);
break;
case VIR_TYPED_PARAM_ULLONG:
vshPrint (ctl, "%s %s %llu\n", device,
- params[i].field, params[i].value.ul);
+ field_name, params[i].value.ul);
break;
case VIR_TYPED_PARAM_DOUBLE:
vshPrint (ctl, "%s %s %f\n", device,
- params[i].field, params[i].value.d);
+ field_name, params[i].value.d);
break;
case VIR_TYPED_PARAM_BOOLEAN:
vshPrint (ctl, "%s %s %s\n", device,
- params[i].field, params[i].value.b ? _("yes") : _("no"));
+ field_name, params[i].value.b ? _("yes") : _("no"));
break;
default:
vshError(ctl, _("unimplemented block statistics parameter type"));
diff --git a/tools/virsh.pod b/tools/virsh.pod
index d826997..c8d88c9 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -501,13 +501,27 @@ be lost once the guest stops running, but the snapshot contents still
exist, and a new domain with the same name and UUID can restore the
snapshot metadata with B<snapshot-create>.
-=item B<domblkstat> I<domain> I<block-device>
+=item B<domblkstat> I<domain> I<block-device> [I<--human>]
Get device block stats for a running domain. A I<block-device> corresponds
to a unique target name (<target dev='name'/>) or source file (<source
file='name'/>) for one of the disk devices attached to I<domain> (see
also B<domblklist> for listing these names).
+Use I<--human> for a more human readable output.
+
+B<Explanation of fields:>
+ rd_req - count of read operations (old format)
+ rd_operations - count of read operations (new format)
+ rd_bytes - count of read bytes
+ rd_total_times - total time read operations took
+ wr_req - count of write operations (old format)
+ wr_operations - count of write operations (new format)
+ wr_bytes - count of written bytes
+ flush_operations - count of flush operations
+ flush_total_times - total time flush operations took
+ errs - error count
+
=item B<domifstat> I<domain> I<interface-device>
Get network interface stats for a running domain.
--
1.7.3.4
13 years, 2 months
[libvirt] [PATCH] Threadpool: Initialize new dynamic workers
by Michal Privoznik
Although we were initializing worker threads during pool creating,
we missed this during virThreadPoolSendJob. This bug led to segmenation
fault as worker thread free() given argument.
---
src/util/threadpool.c | 13 ++++++++++++-
1 files changed, 12 insertions(+), 1 deletions(-)
diff --git a/src/util/threadpool.c b/src/util/threadpool.c
index 70a75c0..6210b00 100644
--- a/src/util/threadpool.c
+++ b/src/util/threadpool.c
@@ -286,6 +286,7 @@ int virThreadPoolSendJob(virThreadPoolPtr pool,
void *jobData)
{
virThreadPoolJobPtr job;
+ struct virThreadPoolWorkerData *data = NULL;
virMutexLock(&pool->mutex);
if (pool->quit)
@@ -298,10 +299,19 @@ int virThreadPoolSendJob(virThreadPoolPtr pool,
goto error;
}
+ if (VIR_ALLOC(data) < 0) {
+ pool->nWorkers--;
+ virReportOOMError();
+ goto error;
+ }
+
+ data->pool = pool;
+ data->cond = &pool->cond;
+
if (virThreadCreate(&pool->workers[pool->nWorkers - 1],
true,
virThreadPoolWorker,
- pool) < 0) {
+ data) < 0) {
pool->nWorkers--;
goto error;
}
@@ -336,6 +346,7 @@ int virThreadPoolSendJob(virThreadPoolPtr pool,
return 0;
error:
+ VIR_FREE(data);
virMutexUnlock(&pool->mutex);
return -1;
}
--
1.7.3.4
13 years, 2 months
[libvirt] [test-API][PATCH] unify similar types of the test file name in repos
by Nan Zhang
---
repos/domain/blkstats.py | 103 ---------------------------------------
repos/domain/domain_blkstats.py | 103 +++++++++++++++++++++++++++++++++++++++
2 files changed, 103 insertions(+), 103 deletions(-)
delete mode 100644 repos/domain/blkstats.py
create mode 100644 repos/domain/domain_blkstats.py
diff --git a/repos/domain/blkstats.py b/repos/domain/blkstats.py
deleted file mode 100644
index 67592f0..0000000
--- a/repos/domain/blkstats.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/evn python
-"""this test case is used for testing domain block
- device statistics
- mandatory arguments: guestname
-"""
-
-__author__ = 'Alex Jia: ajia(a)redhat.com'
-__date__ = 'Wed Jan 27, 2010'
-__version__ = '0.1.0'
-__credits__ = 'Copyright (C) 2009 Red Hat, Inc.'
-__all__ = ['usage', 'check_guest_status', 'check_blkstats',
- 'blkstats']
-
-import os
-import sys
-import time
-
-dir = os.path.dirname(sys.modules[__name__].__file__)
-absdir = os.path.abspath(dir)
-rootdir = os.path.split(os.path.split(absdir)[0])[0]
-sys.path.append(rootdir)
-
-import exception
-from lib import connectAPI
-from lib import domainAPI
-from utils.Python import utils
-
-def usage(params):
- """Verify inputing parameter dictionary"""
- logger = params['logger']
- keys = ['guestname']
- for key in keys:
- if key not in params:
- logger.error("%s is required" %key)
- return 1
-
-def check_guest_status(guestname, domobj):
- """Check guest current status"""
- state = domobj.get_state(guestname)
- if state == "shutoff" or state == "shutdown":
- # add check function
- return False
- else:
- return True
-
-def check_blkstats():
- """Check block device statistic result"""
- pass
-
-def blkstats(params):
- """Domain block device statistic"""
- # Initiate and check parameters
- usage(params)
- logger = params['logger']
- guestname = params['guestname']
- test_result = False
-
- # Connect to local hypervisor connection URI
- util = utils.Utils()
- uri = util.get_uri('127.0.0.1')
- conn = connectAPI.ConnectAPI()
- virconn = conn.open(uri)
-
- caps = conn.get_caps()
- logger.debug(caps)
-
- # Check domain block status
- domobj = domainAPI.DomainAPI(virconn)
- if check_guest_status(guestname, domobj):
- pass
- else:
- domobj.start(guestname)
- time.sleep(90)
- try:
- try:
- (blkstats, path) = domobj.get_block_stats(guestname)
- except exception.LibvirtAPI, e:
- logger.error("libvirt error: error code - %s; error message - %s" %(e.code, e.message))
- return 1;
- finally:
- conn.close()
- logger.info("closed hypervisor connection")
-
- if blkstats:
- # check_blkstats()
- logger.debug(blkstats)
- logger.info("%s rd_req %s" %(path, blkstats[0]))
- logger.info("%s rd_bytes %s" %(path, blkstats[1]))
- logger.info("%s wr_req %s" %(path, blkstats[2]))
- logger.info("%s wr_bytes %s" %(path, blkstats[3]))
- test_result = True
- else:
- logger.error("fail to get domain block statistics\n")
- test_result = False
-
- if test_result:
- return 0
- else:
- return 1
-
-def blkstats_clean(params):
- """ clean testing environment """
- pass
diff --git a/repos/domain/domain_blkstats.py b/repos/domain/domain_blkstats.py
new file mode 100644
index 0000000..79ea99a
--- /dev/null
+++ b/repos/domain/domain_blkstats.py
@@ -0,0 +1,103 @@
+#!/usr/bin/evn python
+"""this test case is used for testing domain block
+ device statistics
+ mandatory arguments: guestname
+"""
+
+__author__ = 'Alex Jia: ajia(a)redhat.com'
+__date__ = 'Wed Jan 27, 2010'
+__version__ = '0.1.0'
+__credits__ = 'Copyright (C) 2009 Red Hat, Inc.'
+__all__ = ['usage', 'check_guest_status', 'check_blkstats',
+ 'blkstats']
+
+import os
+import sys
+import time
+
+dir = os.path.dirname(sys.modules[__name__].__file__)
+absdir = os.path.abspath(dir)
+rootdir = os.path.split(os.path.split(absdir)[0])[0]
+sys.path.append(rootdir)
+
+import exception
+from lib import connectAPI
+from lib import domainAPI
+from utils.Python import utils
+
+def usage(params):
+ """Verify inputing parameter dictionary"""
+ logger = params['logger']
+ keys = ['guestname']
+ for key in keys:
+ if key not in params:
+ logger.error("%s is required" %key)
+ return 1
+
+def check_guest_status(guestname, domobj):
+ """Check guest current status"""
+ state = domobj.get_state(guestname)
+ if state == "shutoff" or state == "shutdown":
+ # add check function
+ return False
+ else:
+ return True
+
+def check_blkstats():
+ """Check block device statistic result"""
+ pass
+
+def domain_blkstats(params):
+ """Domain block device statistic"""
+ # Initiate and check parameters
+ usage(params)
+ logger = params['logger']
+ guestname = params['guestname']
+ test_result = False
+
+ # Connect to local hypervisor connection URI
+ util = utils.Utils()
+ uri = util.get_uri('127.0.0.1')
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+
+ caps = conn.get_caps()
+ logger.debug(caps)
+
+ # Check domain block status
+ domobj = domainAPI.DomainAPI(virconn)
+ if check_guest_status(guestname, domobj):
+ pass
+ else:
+ domobj.start(guestname)
+ time.sleep(90)
+ try:
+ try:
+ (blkstats, path) = domobj.get_block_stats(guestname)
+ except exception.LibvirtAPI, e:
+ logger.error("libvirt error: error code - %s; error message - %s" %(e.code, e.message))
+ return 1;
+ finally:
+ conn.close()
+ logger.info("closed hypervisor connection")
+
+ if blkstats:
+ # check_blkstats()
+ logger.debug(blkstats)
+ logger.info("%s rd_req %s" %(path, blkstats[0]))
+ logger.info("%s rd_bytes %s" %(path, blkstats[1]))
+ logger.info("%s wr_req %s" %(path, blkstats[2]))
+ logger.info("%s wr_bytes %s" %(path, blkstats[3]))
+ test_result = True
+ else:
+ logger.error("fail to get domain block statistics\n")
+ test_result = False
+
+ if test_result:
+ return 0
+ else:
+ return 1
+
+def domain_blkstats_clean(params):
+ """ clean testing environment """
+ pass
--
1.7.4.4
13 years, 2 months
[libvirt] [test-API][PATCH] Add test case for start domain on nfs storage
by Wayne Sun
* This test is for start a domain with img file on nfs storage.
Under SElinux boolean virt_use_nfs on or off, combine with
setting the dynamic_ownership in /etc/libvirt/qemu.conf,
check whether the guest can be started or not. The nfs could
be root_squash or no_root_squash. SElinux should be enabled
and enforcing on host.
---
repos/sVirt/domain_nfs_start.py | 476 +++++++++++++++++++++++++++++++++++++++
1 files changed, 476 insertions(+), 0 deletions(-)
create mode 100644 repos/sVirt/__init__.py
create mode 100644 repos/sVirt/domain_nfs_start.py
diff --git a/repos/sVirt/__init__.py b/repos/sVirt/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/repos/sVirt/domain_nfs_start.py b/repos/sVirt/domain_nfs_start.py
new file mode 100644
index 0000000..0b66128
--- /dev/null
+++ b/repos/sVirt/domain_nfs_start.py
@@ -0,0 +1,476 @@
+#!/usr/bin/env python
+"""This test is for start a guest with img file on nfs storage.
+ Under SElinux boolean virt_use_nfs on or off, combine with
+ setting the dynamic_ownership in /etc/libvirt/qemu.conf,
+ check whether the guest can be started or not. The nfs could
+ be root_squash or no_root_squash. SElinux should be enabled
+ and enforcing on host.
+ sVirt:domain_nfs_start
+ guestname
+ #GUESTNAME#
+ dynamic_ownership
+ enable|disable
+ virt_use_nfs
+ on|off
+ root_squash
+ yes|no
+"""
+
+__author__ = 'Wayne Sun: gsun(a)redhat.com'
+__date__ = 'Mon Sep 2, 2011'
+__version__ = '0.1.0'
+__credits__ = 'Copyright (C) 2011 Red Hat, Inc.'
+__all__ = ['domain_nfs_start']
+
+import os
+import re
+import sys
+
+QEMU_CONF = "/etc/libvirt/qemu.conf"
+
+def append_path(path):
+ """Append root path of package"""
+ if path in sys.path:
+ pass
+ else:
+ sys.path.append(path)
+
+from lib import connectAPI
+from lib import domainAPI
+from utils.Python import utils
+from exception import LibvirtAPI
+
+pwd = os.getcwd()
+result = re.search('(.*)libvirt-test-API', pwd)
+append_path(result.group(0))
+
+def return_close(conn, logger, ret):
+ """close hypervisor connection and return the given value"""
+ conn.close()
+ logger.info("closed hypervisor connection")
+ return ret
+
+def check_params(params):
+ """Verify inputing parameter dictionary"""
+ logger = params['logger']
+ keys = ['guestname', 'dynamic_ownership', 'virt_use_nfs', 'root_squash']
+ for key in keys:
+ if key not in params:
+ logger.error("%s is required" %key)
+ return 1
+ return 0
+
+def nfs_setup(util, root_squash, logger):
+ """setup nfs on localhost
+ """
+ logger.info("set nfs service")
+ if root_squash == "yes":
+ option = "root_squash"
+ elif root_squash == "no":
+ option = "no_root_squash"
+
+ cmd = "echo /tmp *\(rw,%s\) >> /etc/exports" % option
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to config nfs export")
+ return 1
+
+ logger.info("restart nfs service")
+ cmd = "service nfs restart"
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to restart nfs service")
+ return 1
+ else:
+ for i in range(len(out)):
+ logger.info(out[i])
+
+ return 0
+
+def prepare_env(util, d_ownership, virt_use_nfs, guestname, root_squash, \
+ disk_file, img_dir, logger):
+ """set virt_use_nfs SElinux boolean, configure
+ dynamic_ownership in /etc/libvirt/qemu.conf
+ """
+ logger.info("set virt_use_nfs selinux boolean")
+ cmd = "setsebool virt_use_nfs %s" % virt_use_nfs
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to set virt_use_nfs SElinux boolean")
+ return 1
+
+ logger.info("set the dynamic ownership in %s as %s" % \
+ (QEMU_CONF, d_ownership))
+ if d_ownership == "enable":
+ option = 1
+ elif d_ownership == "disable":
+ option = 0
+ set_cmd = "echo dynamic_ownership = %s >> %s" % \
+ (option, QEMU_CONF)
+ ret, out = util.exec_cmd(set_cmd, shell=True)
+ if ret:
+ logger.error("failed to set dynamic ownership")
+ return 1
+
+ logger.info("restart libvirtd")
+ restart_cmd = "service libvirtd restart"
+ ret, out = util.exec_cmd(restart_cmd, shell=True)
+ if ret:
+ logger.error("failed to restart libvirtd")
+ for i in range(len(out)):
+ logger.info(out[i])
+ return 1
+ else:
+ for i in range(len(out)):
+ logger.info(out[i])
+
+ logger.info("copy %s img file to nfs path" %guestname)
+ cmd = "cp %s /tmp" % disk_file
+ ret, out = util.exec_cmd(cmd, shell=True)
+ if ret:
+ logger.error("failed to cp %s img file to nfs path" % guestname)
+ return 1
+
+ logger.info("set up nfs service on localhost")
+ ret = nfs_setup(util, root_squash, logger)
+ if ret:
+ return 1
+
+ logger.info("mount nfs to img dir path")
+ mount_cmd = "mount -o vers=3 127.0.0.1:/tmp %s" % img_dir
+ ret, out = util.exec_cmd(mount_cmd, shell=True)
+ if ret:
+ logger.error("Failed to mount the nfs path")
+ for i in range(len(out)):
+ logger.info(out[i])
+ return 1
+
+ return 0
+
+def domain_nfs_start(params):
+ """start domain with img on nfs"""
+ # Initiate and check parameters
+ params_check_result = check_params(params)
+ if params_check_result:
+ return 1
+
+ logger = params['logger']
+ guestname = params['guestname']
+ dynamic_ownership = params['dynamic_ownership']
+ virt_use_nfs = params['virt_use_nfs']
+ root_squash = params['root_squash']
+
+ util = utils.Utils()
+
+ # Connect to local hypervisor connection URI
+ uri = util.get_uri('127.0.0.1')
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+ domobj = domainAPI.DomainAPI(virconn)
+
+ logger.info("get the domain state")
+ try:
+ state = domobj.get_state(guestname)
+ logger.info("domain %s is %s" % (guestname, state))
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to get domain %s state" % guestname)
+ return return_close(conn, logger, 1)
+
+ if state != "shutoff":
+ logger.info("shut down the domain %s" % guestname)
+ try:
+ domobj.destroy(guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to destroy domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ logger.info("get guest img file path")
+ try:
+ dom_xml = domobj.get_xml_desc(guestname)
+ disk_file = util.get_disk_path(dom_xml)
+ logger.info("%s disk file path is %s" % (guestname, disk_file))
+ img_dir = os.path.dirname(disk_file)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to get domain %s xml" % guestname)
+ return return_close(conn, logger, 1)
+
+ # set env
+ logger.info("prepare the environment")
+ ret = prepare_env(util, dynamic_ownership, virt_use_nfs, guestname,\
+ root_squash, disk_file, img_dir, logger)
+ if ret:
+ logger.error("failed to prepare the environment")
+ return return_close(conn, logger, 1)
+
+ # some operation cause the connection broke, so reconnect here. Unsure the
+ # reason, need fix on this
+ conn.close()
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+ domobj = domainAPI.DomainAPI(virconn)
+
+ logger.info("begin to test start domain from nfs storage")
+ logger.info("First, start the domain without chown the img file to qemu")
+ logger.info("start domain %s" % guestname)
+ if root_squash == "yes":
+ if virt_use_nfs == "on":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+ elif virt_use_nfs == "off":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+ elif root_squash == "no":
+ if virt_use_nfs == "on":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.info("Success start domain %s" % guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Fail to start domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+ elif virt_use_nfs == "off":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ logger.info("get the domain state")
+ try:
+ state = domobj.get_state(guestname)
+ logger.info("domain %s is %s" % (guestname, state))
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to get domain %s state" % guestname)
+ return return_close(conn, logger, 1)
+
+ if state != "shutoff":
+ logger.info("shut down the domain %s" % guestname)
+ try:
+ domobj.destroy(guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Error: fail to destroy domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ logger.info("Second, start the domain after chown the img file to qemu")
+
+ file_name = os.path.basename(disk_file)
+ filepath = "/tmp/%s" % file_name
+ logger.info("set chown of %s as 107:107" % filepath)
+ chown_cmd = "chown 107:107 %s" % filepath
+ ret, out = util.exec_cmd(chown_cmd, shell=True)
+ if ret:
+ logger.error("failed to chown %s to qemu:qemu" % filepath)
+ return return_close(conn, logger, 1)
+
+ logger.info("start domain %s" % guestname)
+ if root_squash == "yes":
+ if virt_use_nfs == "on":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.info("Success start domain %s" % guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Fail to start domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.info("Success start domain %s" % guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Fail to start domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ elif virt_use_nfs == "off":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+ elif root_squash == "no":
+ if virt_use_nfs == "on":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.info("Success start domain %s" % guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Fail to start domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.info("Success start Domain %s" % guestname)
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.error("Fail to start domain %s" % guestname)
+ return return_close(conn, logger, 1)
+
+ elif virt_use_nfs == "off":
+ if dynamic_ownership == "enable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ elif dynamic_ownership == "disable":
+ try:
+ domobj.start(guestname)
+ logger.error("Domain %s started, this is not expected" % \
+ guestname)
+ return return_close(conn, logger, 1)
+ except LibvirtAPI, e:
+ logger.info("API error message: %s, error code is %s" % \
+ (e.response()['message'], e.response()['code']))
+ logger.info("Fail to start domain %s, this is expected" % \
+ guestname)
+
+ return return_close(conn, logger, 0)
+
+def domain_nfs_start_clean(params):
+ """clean testing environment"""
+ logger = params['logger']
+ guestname = params['guestname']
+
+ util = utils.Utils()
+
+ # Connect to local hypervisor connection URI
+ uri = util.get_uri('127.0.0.1')
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+ domobj = domainAPI.DomainAPI(virconn)
+
+ if domobj.get_state(guestname) != "shutoff":
+ domobj.destroy(guestname)
+
+ dom_xml = domobj.get_xml_desc(guestname)
+ disk_file = util.get_disk_path(dom_xml)
+ img_dir = os.path.dirname(disk_file)
+ file_name = os.path.basename(disk_file)
+ temp_file = "/tmp/%s" % file_name
+
+ umount_cmd = "umount -f %s" % img_dir
+ ret, out = util.exec_cmd(umount_cmd, shell=True)
+ if ret:
+ logger.error("failed to umount %s" % img_dir)
+
+ if os.path.exists(temp_file):
+ os.remove(temp_file)
+
+ conn.close()
+
--
1.7.1
13 years, 2 months
[libvirt] [PATCH v1] domain_conf: add the support for disk I/O throttle setting
by Zhi Yong Wu
The first patch is only used to see if it is suitable for exteeding blkiotune to implement disk I/O throttling.
As you have known, when blkiotune is issued without options, it will display current tuning parameters; If we exceed it, without options, what should it display? both info will? or should one new option be added to separately display them?
Signed-off-by: Zhi Yong Wu <wuzhy(a)linux.vnet.ibm.com>
---
src/conf/domain_conf.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++-
src/conf/domain_conf.h | 11 +++++++
2 files changed, 79 insertions(+), 2 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index cce9955..d9108fa 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -2225,6 +2225,7 @@ cleanup:
static virDomainDiskDefPtr
virDomainDiskDefParseXML(virCapsPtr caps,
xmlNodePtr node,
+ xmlXPathContextPtr ctxt,
virBitmapPtr bootMap,
unsigned int flags)
{
@@ -2266,7 +2267,9 @@ virDomainDiskDefParseXML(virCapsPtr caps,
}
cur = node->children;
+ xmlNodePtr oldnode = ctxt->node;
while (cur != NULL) {
+ ctxt->node = cur;
if (cur->type == XML_ELEMENT_NODE) {
if ((source == NULL && hosts == NULL) &&
(xmlStrEqual(cur->name, BAD_CAST "source"))) {
@@ -2362,6 +2365,36 @@ virDomainDiskDefParseXML(virCapsPtr caps,
iotag = virXMLPropString(cur, "io");
ioeventfd = virXMLPropString(cur, "ioeventfd");
event_idx = virXMLPropString(cur, "event_idx");
+ } else if (xmlStrEqual(cur->name, BAD_CAST "blkiothrottle")) {
+ if (virXPathULongLong("string(./blkiothrottle/bps)", ctxt,
+ &def->blkiothrottle.bps) < 0) {
+ def->blkiothrottle.bps = 0;
+ }
+
+ if (virXPathULongLong("string(./blkiothrottle/bps_rd)", ctxt,
+ &def->blkiothrottle.bps_rd) < 0) {
+ def->blkiothrottle.bps_rd = 0;
+ }
+
+ if (virXPathULongLong("string(./blkiothrottle/bps_wr)", ctxt,
+ &def->blkiothrottle.bps_wr) < 0) {
+ def->blkiothrottle.bps_wr = 0;
+ }
+
+ if (virXPathULongLong("string(./blkiothrottle/iops)", ctxt,
+ &def->blkiothrottle.iops) < 0) {
+ def->blkiothrottle.iops = 0;
+ }
+
+ if (virXPathULongLong("string(./blkiothrottle/iops_rd)", ctxt,
+ &def->blkiothrottle.iops_rd) < 0) {
+ def->blkiothrottle.iops_rd = 0;
+ }
+
+ if (virXPathULongLong("string(./blkiothrottle/iops_wr)", ctxt,
+ &def->blkiothrottle.iops_wr) < 0) {
+ def->blkiothrottle.iops_wr = 0;
+ }
} else if (xmlStrEqual(cur->name, BAD_CAST "readonly")) {
def->readonly = 1;
} else if (xmlStrEqual(cur->name, BAD_CAST "shareable")) {
@@ -2387,6 +2420,7 @@ virDomainDiskDefParseXML(virCapsPtr caps,
}
cur = cur->next;
}
+ ctxt->node = oldnode;
device = virXMLPropString(node, "device");
if (device) {
@@ -5684,9 +5718,13 @@ virDomainDeviceDefPtr virDomainDeviceDefParse(virCapsPtr caps,
if (xmlStrEqual(node->name, BAD_CAST "disk")) {
dev->type = VIR_DOMAIN_DEVICE_DISK;
- if (!(dev->data.disk = virDomainDiskDefParseXML(caps, node,
- NULL, flags)))
+ xmlNodePtr oldnode = ctxt->node;
+ if (!(dev->data.disk = virDomainDiskDefParseXML(caps, node, ctxt,
+ NULL, flags))) {
+ ctxt->node = oldnode;
goto error;
+ }
+ ctxt->node = oldnode;
} else if (xmlStrEqual(node->name, BAD_CAST "lease")) {
dev->type = VIR_DOMAIN_DEVICE_LEASE;
if (!(dev->data.lease = virDomainLeaseDefParseXML(node)))
@@ -6725,11 +6763,16 @@ static virDomainDefPtr virDomainDefParseXML(virCapsPtr caps,
}
if (n && VIR_ALLOC_N(def->disks, n) < 0)
goto no_memory;
+
+ xmlNodePtr oldnode = ctxt->node;
for (i = 0 ; i < n ; i++) {
+ ctxt->node = nodes[i];
virDomainDiskDefPtr disk = virDomainDiskDefParseXML(caps,
nodes[i],
+ ctxt,
bootMap,
flags);
+ ctxt->node = oldnode;
if (!disk)
goto error;
@@ -9065,6 +9108,29 @@ virDomainDiskDefFormat(virBufferPtr buf,
virBufferAsprintf(buf, " <target dev='%s' bus='%s'/>\n",
def->dst, bus);
+ /*disk I/O throttling*/
+ if (def->blkio.blkiothrottle.bps
+ || def->blkio.blkiothrottle.bps_rd
+ || def->blkio.blkiothrottle.bps_wr
+ || def->blkio.blkiothrottle.iops
+ || def->blkio.blkiothrottle.iops_rd
+ || def->blkio.blkiothrottle.iops_wr) {
+ virBufferAsprintf(&buf, " <blkiothrottle>\n");
+ virBufferAsprintf(&buf, " <bps>%llu</bps>\n",
+ def->blkiothrottle.bps);
+ virBufferAsprintf(&buf, " <bps_rd>%llu</bps_rd>\n",
+ def->blkiothrottle.bps_rd);
+ virBufferAsprintf(&buf, " <bps_wr>%llu</bps_wr>\n",
+ def->blkiothrottle.bps_wr);
+ virBufferAsprintf(&buf, " <iops>%llu</iops>\n",
+ def->blkiothrottle.iops);
+ virBufferAsprintf(&buf, " <iops_rd>%llu</iops_rd>\n",
+ def->blkiothrottle.iops_rd);
+ virBufferAsprintf(&buf, " <iops_wr>%llu</iops_wr>\n",
+ def->blkiothrottle.iops_wr);
+ virBufferAsprintf(&buf, " </blkiothrottle>\n");
+ }
+
if (def->bootIndex)
virBufferAsprintf(buf, " <boot order='%d'/>\n", def->bootIndex);
if (def->readonly)
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index e218a30..5902377 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -258,6 +258,17 @@ struct _virDomainDiskDef {
virDomainDiskHostDefPtr hosts;
char *driverName;
char *driverType;
+
+ /*disk I/O throttling*/
+ struct {
+ unsigned long long bps;
+ unsigned long long bps_rd;
+ unsigned long long bps_wr;
+ unsigned long long iops;
+ unsigned long long iops_rd;
+ unsigned long long iops_wr;
+ } blkiothrottle;
+
char *serial;
int cachemode;
int error_policy;
--
1.7.6
13 years, 2 months
[libvirt] [PATCH v1] domain_conf: add the support for disk I/O throttle setting
by Zhi Yong Wu
The first patch is only used to see if it is suitable for exteeding blkiotune to implement disk I/O throttling.
As you have known, when blkiotune is issued without options, it will display current tuning parameters; If we exceed it, without options, what should it display? both info will? or should one new option be added to separately display them?
Signed-off-by: Zhi Yong Wu <wuzhy(a)linux.vnet.ibm.com>
---
src/conf/domain_conf.c | 18 ++++++++++++++++++
src/conf/domain_conf.h | 11 +++++++++++
2 files changed, 29 insertions(+), 0 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index cce9955..7dd350a 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -9065,6 +9065,24 @@ virDomainDiskDefFormat(virBufferPtr buf,
virBufferAsprintf(buf, " <target dev='%s' bus='%s'/>\n",
def->dst, bus);
+ /*disk I/O throttling*/
+ if (def->blkio.blkiothrottle) {
+ virBufferAsprintf(&buf, " <blkiothrottle>\n");
+ virBufferAsprintf(&buf, " <bps>%llu</bps>\n",
+ def->blkiothrottle.bps);
+ virBufferAsprintf(&buf, " <bps_rd>%llu</bps_rd>\n",
+ def->blkiothrottle.bps_rd);
+ virBufferAsprintf(&buf, " <bps_wr>%llu</bps_wr>\n",
+ def->blkiothrottle.bps_wr);
+ virBufferAsprintf(&buf, " <iops>%llu</iops>\n",
+ def->blkiothrottle.iops);
+ virBufferAsprintf(&buf, " <iops_rd>%llu</iops_rd>\n",
+ def->blkiothrottle.iops_rd);
+ virBufferAsprintf(&buf, " <iops_wr>%llu</iops_wr>\n",
+ def->blkiothrottle.iops_wr);
+ virBufferAsprintf(&buf, " </blkiothrottle>\n");
+ }
+
if (def->bootIndex)
virBufferAsprintf(buf, " <boot order='%d'/>\n", def->bootIndex);
if (def->readonly)
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index e218a30..5902377 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -258,6 +258,17 @@ struct _virDomainDiskDef {
virDomainDiskHostDefPtr hosts;
char *driverName;
char *driverType;
+
+ /*disk I/O throttling*/
+ struct {
+ unsigned long long bps;
+ unsigned long long bps_rd;
+ unsigned long long bps_wr;
+ unsigned long long iops;
+ unsigned long long iops_rd;
+ unsigned long long iops_wr;
+ } blkiothrottle;
+
char *serial;
int cachemode;
int error_policy;
--
1.7.6
13 years, 2 months
[libvirt] [RFC] netlink monitor thread
by D. Herrendoerfer
Hello,
I'm trying to get libvirt to recognize port profile disassociations,
i.e. when the switch removes a port profile because lldpad has
unexpectedly quit or the profile has been changed in the switches
profile database, or when something went wrong otherwise.
My take on this for testing was to have an external deamon log
port profiles and changes to them, and tell lldpad to set them
again if needed. But this is not really practical.
I would like to find a way to have the libvitrd monitor netlink
and take appropriate measures if, for example, a port profile
is removed unexpectedly, that is: to call the VMs network setup
again if macvtap/VEPA is used.
There would be other benefits, like the detection if a link goes
down on a used interface, or a device is unexpectedly configured
down by another component.
Looking at the deamon code I'm unclear on how to do this properly,
this is a very linux specific thing, and the asynchronous nature of the
request handling makes starting the monitor thread later on difficult.
Thoughts, ideas ?
D. Herrendoerfer
13 years, 2 months
[libvirt] [PATCH] Add test case update_devflag.py for update device flag
by Nan Zhang
---
repos/domain/update_devflag.py | 163 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 163 insertions(+), 0 deletions(-)
create mode 100644 repos/domain/update_devflag.py
diff --git a/repos/domain/update_devflag.py b/repos/domain/update_devflag.py
new file mode 100644
index 0000000..30f75b0
--- /dev/null
+++ b/repos/domain/update_devflag.py
@@ -0,0 +1,163 @@
+#!/usr/bin/evn python
+"""Update virtual device to guest from an XML file
+"""
+
+__author__ = 'Nan Zhang: nzhang(a)redhat.com'
+__date__ = 'Fri Sep 2, 2011'
+__version__ = '0.1.0'
+__credits__ = 'Copyright (C) 2011 Red Hat, Inc.'
+__all__ = ['usage', 'update_devflag']
+
+import os
+import re
+import sys
+import commands
+from xml.dom import minidom
+
+def append_path(path):
+ """Append root path of package"""
+ if path in sys.path:
+ pass
+ else:
+ sys.path.append(path)
+
+pwd = os.getcwd()
+result = re.search('(.*)libvirt-test-API', pwd)
+append_path(result.group(0))
+
+from lib import connectAPI
+from lib import domainAPI
+from utils.Python import utils
+from utils.Python import xmlbuilder
+from exception import LibvirtAPI
+
+def usage():
+ print '''usage: mandatory arguments:
+ guestname
+ devtype
+ '''
+
+def check_params(params):
+ """Verify inputing parameter dictionary"""
+ logger = params['logger']
+ keys = ['guestname', 'devtype']
+ for key in keys:
+ if key not in params:
+ logger.error("%s is required" %key)
+ usage()
+ return 1
+ return 0
+
+def create_image(params, img_name, img_size):
+ """Create an image file"""
+ logger = params['logger']
+ stat, ret = commands.getstatusoutput("dd if=/dev/zero of=%s bs=1 \
+ count=1 seek=%s" % (img_name, img_size))
+ if stat == 0:
+ logger.debug("create image result:\n%s" % ret)
+ return True
+ else:
+ return False
+
+def check_updated_device(params, guestname, domobj, srcfile):
+ """Check if the device is updated"""
+ logger = params['logger']
+ xmlobj = domobj.get_xml_desc(guestname)
+ domxml = minidom.parseString(xmlobj)
+
+ for diskTag in domxml.getElementsByTagName("source"):
+ if diskTag.parentNode.getAttribute("device") == 'cdrom':
+ upfile = diskTag.getAttribute("file")
+ elif diskTag.parentNode.getAttribute('device') == 'floppy':
+ upfile = diskTag.getAttribute("file")
+
+ if upfile == srcfile:
+ return False, upfile
+ else:
+ return True, upfile
+
+def update_devflag(params):
+ """Update virtual device to a domain from xml"""
+
+ # Initiate and check parameters
+ params_check_result = check_params(params)
+ if params_check_result:
+ return 1
+ logger = params['logger']
+ guestname = params['guestname']
+ devtype = params['devtype']
+ if devtype == 'cdrom':
+ xmlargs = {}
+ xmlargs['guestname'] = guestname
+ xmlargs['guesttype'] = 'kvm'
+ xmlargs['hdmodel'] = 'ide'
+ xmlargs['bootcd'] = '/var/lib/libvirt/boot/cdrom.img'
+ srcfile = xmlargs['bootcd']
+ create_image(params, srcfile, '100M')
+ elif devtype == 'floppy':
+ xmlargs = {}
+ xmlargs['guestname'] = guestname
+ xmlargs['floppysource'] = '/var/lib/libvirt/boot/floppy.img'
+ srcfile = xmlargs['floppysource']
+ create_image(params, srcfile, '2M')
+ else:
+ srcfile = None
+ logger.error("Wrong device type was specified.")
+ return 1
+
+ if not params.has_key('flag'):
+ flag = domainAPI.VIR_DOMAIN_AFFECT_CONFIG
+
+ # Connect to local hypervisor connection URI
+ util = utils.Utils()
+ uri = util.get_uri('127.0.0.1')
+ conn = connectAPI.ConnectAPI()
+ virconn = conn.open(uri)
+
+ caps = conn.get_caps()
+ logger.debug(caps)
+
+ # Generate device XML for updating
+ domobj = domainAPI.DomainAPI(virconn)
+ newxmlobj = xmlbuilder.XmlBuilder()
+
+ if devtype == 'cdrom':
+ newdevxml = newxmlobj.build_cdrom(xmlargs)
+ elif devtype == 'floppy':
+ newdevxml = newxmlobj.build_floppy(xmlargs)
+
+ logger.debug("block device xml desc:\n%s" %newdevxml)
+
+ try:
+ try:
+ domobj.update_device_flag(guestname, newdevxml, flag)
+ res, upfile = check_updated_device(params, guestname, \
+ domobj, srcfile)
+ if res:
+ logger.info("success to update '%s' device: %s\n" % \
+ (devtype, upfile))
+ else:
+ logger.error("fail to update '%s' device: %s\n" % \
+ (devtype, upfile))
+ except LibvirtAPI, e:
+ logger.error("API error message: %s, error code is %s" %
+ (e.response()['message'], e.response()['code']))
+ conn.close()
+ logger.info("closed hypervisor connection")
+ return 1
+ finally:
+ conn.close()
+ logger.info("closed hypervisor connection")
+
+ return 0
+
+def update_devflag_clean(params):
+ """Clean testing environment"""
+ logger = params['logger']
+
+ if params['devtype'] == 'cdrom':
+ os.system('rm -f /var/lib/libvirt/boot/cdrom.img')
+ elif params['devtype'] == 'floppy':
+ os.system('rm -f /var/lib/libvirt/boot/floppy.img')
+ else:
+ logger.debug("image file was not found.")
--
1.7.4.4
13 years, 2 months
[libvirt] [PATCH] esx: Support folders in the path of vpx:// connection URIs
by Matthias Bolte
Allow the datacenter and compute resource parts of the path
to be prefixed with folders. Therefore, the way the path is
parsed has changed. Before, it was split in 2 or 3 items and
the items' meanings were determined by their positions. Now
the path can have 2 or more items and the the vCenter server
is asked whether a folder, datacenter of compute resource
with the specified name exists at the current hierarchy level.
Before the datacenter and compute resource lookup automatically
traversed folders during lookup. This is logic got removed
and folders have to be specified explicitly.
The proper datacenter path including folders is now used when
accessing a datastore over HTTPS. This makes virsh dumpxml
and define work for datacenters in folders.
https://bugzilla.redhat.com/show_bug.cgi?id=732676
---
docs/drvesx.html.in | 10 ++-
src/esx/esx_driver.c | 14 ++--
src/esx/esx_util.c | 19 +---
src/esx/esx_util.h | 1 +
src/esx/esx_vi.c | 223 ++++++++++++++++++++++++++++++++++------
src/esx/esx_vi_generator.input | 4 +
6 files changed, 215 insertions(+), 56 deletions(-)
diff --git a/docs/drvesx.html.in b/docs/drvesx.html.in
index da9d2a1..aa8ecd4 100644
--- a/docs/drvesx.html.in
+++ b/docs/drvesx.html.in
@@ -56,7 +56,7 @@ esx://example-esx.com/?no_verify=1 (ESX over HTTPS, but doesn't verify the s
URIs have this general form (<code>[...]</code> marks an optional part).
</p>
<pre>
-type://[username@]hostname[:port]/[datacenter[/cluster]/server][?extraparameters]
+type://[username@]hostname[:port]/[[folder/...]datacenter/[folder/...][cluster/]server][?extraparameters]
</pre>
<p>
The <code>type://</code> is either <code>esx://</code> or
@@ -80,6 +80,14 @@ type://[username@]hostname[:port]/[datacenter[/cluster]/server][?extraparameters
<pre>
vpx://example-vcenter.com/dc1/cluster1/example-esx.com
</pre>
+ <p>
+ Datacenters and clusters can be organized in folders, those have to be
+ specified as well. The driver can handle folders
+ <span class="since">since 0.9.5</span>.
+ </p>
+<pre>
+vpx://example-vcenter.com/folder1/dc1/folder2/example-esx.com
+</pre>
<h4><a name="extraparams">Extra parameters</a></h4>
diff --git a/src/esx/esx_driver.c b/src/esx/esx_driver.c
index f1102ea..fd83a9c 100644
--- a/src/esx/esx_driver.c
+++ b/src/esx/esx_driver.c
@@ -802,8 +802,7 @@ esxConnectToVCenter(esxPrivate *priv, virConnectAuthPtr auth,
char *url = NULL;
if (hostSystemIpAddress == NULL &&
- (priv->parsedUri->path_datacenter == NULL ||
- priv->parsedUri->path_computeResource == NULL)) {
+ (priv->parsedUri->path == NULL || STREQ(priv->parsedUri->path, "/"))) {
ESX_ERROR(VIR_ERR_INVALID_ARG, "%s",
_("Path has to specify the datacenter and compute resource"));
return -1;
@@ -890,8 +889,8 @@ esxConnectToVCenter(esxPrivate *priv, virConnectAuthPtr auth,
/*
- * URI format: {vpx|esx|gsx}://[<username>@]<hostname>[:<port>]/[<path>][?<query parameter> ...]
- * <path> = <datacenter>/<computeresource>[/<hostsystem>]
+ * URI format: {vpx|esx|gsx}://[<username>@]<hostname>[:<port>]/[<path>][?<query parameter>...]
+ * <path> = [<folder>/...]<datacenter>/[<folder>/...]<computeresource>[/<hostsystem>]
*
* If no port is specified the default port is set dependent on the scheme and
* transport parameter:
@@ -905,7 +904,8 @@ esxConnectToVCenter(esxPrivate *priv, virConnectAuthPtr auth,
* For a vpx:// connection <path> references a host managed by the vCenter.
* In case the host is part of a cluster then <computeresource> is the cluster
* name. Otherwise <computeresource> and <hostsystem> are equal and the later
- * can be omitted.
+ * can be omitted. As datacenters and computeresources can be organized in
+ * folders those have to be included in <path>.
*
* Optional query parameters:
* - transport={http|https}
@@ -2744,7 +2744,7 @@ esxDomainGetXMLDesc(virDomainPtr domain, unsigned int flags)
domain->conn->uri->server, domain->conn->uri->port);
virBufferURIEncodeString(&buffer, directoryAndFileName);
virBufferAddLit(&buffer, "?dcPath=");
- virBufferURIEncodeString(&buffer, priv->primary->datacenter->name);
+ virBufferURIEncodeString(&buffer, priv->parsedUri->path_datacenter);
virBufferAddLit(&buffer, "&dsName=");
virBufferURIEncodeString(&buffer, datastoreName);
@@ -3212,7 +3212,7 @@ esxDomainDefineXML(virConnectPtr conn, const char *xml)
virBufferURIEncodeString(&buffer, escapedName);
virBufferAddLit(&buffer, ".vmx?dcPath=");
- virBufferURIEncodeString(&buffer, priv->primary->datacenter->name);
+ virBufferURIEncodeString(&buffer, priv->parsedUri->path_datacenter);
virBufferAddLit(&buffer, "&dsName=");
virBufferURIEncodeString(&buffer, datastoreName);
diff --git a/src/esx/esx_util.c b/src/esx/esx_util.c
index c14179d..5243a0e 100644
--- a/src/esx/esx_util.c
+++ b/src/esx/esx_util.c
@@ -51,7 +51,6 @@ esxUtil_ParseUri(esxUtil_ParsedUri **parsedUri, xmlURIPtr uri)
int noVerify;
int autoAnswer;
char *tmp;
- char *saveptr;
if (parsedUri == NULL || *parsedUri != NULL) {
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR, "%s", _("Invalid argument"));
@@ -184,26 +183,13 @@ esxUtil_ParseUri(esxUtil_ParsedUri **parsedUri, xmlURIPtr uri)
}
}
- /* Expected format: [/]<datacenter>/<computeresource>[/<hostsystem>] */
if (uri->path != NULL) {
- tmp = strdup(uri->path);
+ (*parsedUri)->path = strdup(uri->path);
- if (tmp == NULL) {
+ if ((*parsedUri)->path == NULL) {
virReportOOMError();
goto cleanup;
}
-
- if (esxVI_String_DeepCopyValue(&(*parsedUri)->path_datacenter,
- strtok_r(tmp, "/", &saveptr)) < 0 ||
- esxVI_String_DeepCopyValue(&(*parsedUri)->path_computeResource,
- strtok_r(NULL, "/", &saveptr)) < 0 ||
- esxVI_String_DeepCopyValue(&(*parsedUri)->path_hostSystem,
- strtok_r(NULL, "", &saveptr)) < 0) {
- VIR_FREE(tmp);
- goto cleanup;
- }
-
- VIR_FREE(tmp);
}
if ((*parsedUri)->transport == NULL) {
@@ -242,6 +228,7 @@ esxUtil_FreeParsedUri(esxUtil_ParsedUri **parsedUri)
VIR_FREE((*parsedUri)->transport);
VIR_FREE((*parsedUri)->vCenter);
VIR_FREE((*parsedUri)->proxy_hostname);
+ VIR_FREE((*parsedUri)->path);
VIR_FREE((*parsedUri)->path_datacenter);
VIR_FREE((*parsedUri)->path_computeResource);
VIR_FREE((*parsedUri)->path_hostSystem);
diff --git a/src/esx/esx_util.h b/src/esx/esx_util.h
index 39fdb6d..4d7dccb 100644
--- a/src/esx/esx_util.h
+++ b/src/esx/esx_util.h
@@ -37,6 +37,7 @@ struct _esxUtil_ParsedUri {
int proxy_type;
char *proxy_hostname;
int proxy_port;
+ char *path;
char *path_datacenter;
char *path_computeResource;
char *path_hostSystem;
diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
index f4033eb..6f4f9ee 100644
--- a/src/esx/esx_vi.c
+++ b/src/esx/esx_vi.c
@@ -770,55 +770,214 @@ int
esxVI_Context_LookupObjectsByPath(esxVI_Context *ctx,
esxUtil_ParsedUri *parsedUri)
{
- char *hostSystemName = NULL;
+ int result = -1;
+ char *path = NULL;
+ char *saveptr = NULL;
+ char *previousItem = NULL;
+ char *item = NULL;
+ virBuffer buffer = VIR_BUFFER_INITIALIZER;
+ esxVI_ManagedObjectReference *root = NULL;
+ esxVI_Folder *folder = NULL;
+
+ path = strdup(parsedUri->path);
+
+ if (path == NULL) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
/* Lookup Datacenter */
- if (esxVI_LookupDatacenter(ctx, parsedUri->path_datacenter,
- ctx->service->rootFolder, NULL, &ctx->datacenter,
- esxVI_Occurrence_RequiredItem) < 0) {
- return -1;
+ item = strtok_r(path, "/", &saveptr);
+
+ if (item == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INVALID_ARG,
+ _("Path '%s' does not specify a datacenter"),
+ parsedUri->path);
+ goto cleanup;
+ }
+
+ root = ctx->service->rootFolder;
+
+ while (ctx->datacenter == NULL && item != NULL) {
+ esxVI_Folder_Free(&folder);
+
+ /* Try to lookup item as a folder */
+ if (esxVI_LookupFolder(ctx, item, root, NULL, &folder,
+ esxVI_Occurrence_OptionalItem) < 0) {
+ goto cleanup;
+ }
+
+ if (folder != NULL) {
+ /* It's a folder, use it as new lookup root */
+ if (root != ctx->service->rootFolder) {
+ esxVI_ManagedObjectReference_Free(&root);
+ }
+
+ root = folder->_reference;
+ folder->_reference = NULL;
+ } else {
+ /* Try to lookup item as a datacenter */
+ if (esxVI_LookupDatacenter(ctx, item, root, NULL, &ctx->datacenter,
+ esxVI_Occurrence_OptionalItem) < 0) {
+ goto cleanup;
+ }
+ }
+
+ /* Build path_datacenter */
+ if (virBufferUse(&buffer) > 0) {
+ virBufferAddChar(&buffer, '/');
+ }
+
+ virBufferAdd(&buffer, item, -1);
+
+ previousItem = item;
+ item = strtok_r(NULL, "/", &saveptr);
+ }
+
+ if (ctx->datacenter == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
+ _("Could not find datacenter specified in '%s'"),
+ parsedUri->path);
+ goto cleanup;
}
+ if (virBufferError(&buffer)) {
+ virReportOOMError();
+ goto cleanup;
+ }
+
+ parsedUri->path_datacenter = virBufferContentAndReset(&buffer);
+
/* Lookup (Cluster)ComputeResource */
- if (esxVI_LookupComputeResource(ctx, parsedUri->path_computeResource,
- ctx->datacenter->hostFolder, NULL,
- &ctx->computeResource,
- esxVI_Occurrence_RequiredItem) < 0) {
- return -1;
+ if (item == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INVALID_ARG,
+ _("Path '%s' does not specify a compute resource"),
+ parsedUri->path);
+ goto cleanup;
+ }
+
+ if (root != ctx->service->rootFolder) {
+ esxVI_ManagedObjectReference_Free(&root);
+ }
+
+ root = ctx->datacenter->hostFolder;
+
+ while (ctx->computeResource == NULL && item != NULL) {
+ esxVI_Folder_Free(&folder);
+
+ /* Try to lookup item as a folder */
+ if (esxVI_LookupFolder(ctx, item, root, NULL, &folder,
+ esxVI_Occurrence_OptionalItem) < 0) {
+ goto cleanup;
+ }
+
+ if (folder != NULL) {
+ /* It's a folder, use it as new lookup root */
+ if (root != ctx->datacenter->hostFolder) {
+ esxVI_ManagedObjectReference_Free(&root);
+ }
+
+ root = folder->_reference;
+ folder->_reference = NULL;
+ } else {
+ /* Try to lookup item as a compute resource */
+ if (esxVI_LookupComputeResource(ctx, item, root, NULL,
+ &ctx->computeResource,
+ esxVI_Occurrence_OptionalItem) < 0) {
+ goto cleanup;
+ }
+ }
+
+ /* Build path_computeResource */
+ if (virBufferUse(&buffer) > 0) {
+ virBufferAddChar(&buffer, '/');
+ }
+
+ virBufferAdd(&buffer, item, -1);
+
+ previousItem = item;
+ item = strtok_r(NULL, "/", &saveptr);
+ }
+
+ if (ctx->computeResource == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
+ _("Could not find compute resource specified in '%s'"),
+ parsedUri->path);
+ goto cleanup;
}
if (ctx->computeResource->resourcePool == NULL) {
ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR, "%s",
_("Could not retrieve resource pool"));
- return -1;
+ goto cleanup;
+ }
+
+ if (virBufferError(&buffer)) {
+ virReportOOMError();
+ goto cleanup;
}
+ parsedUri->path_computeResource = virBufferContentAndReset(&buffer);
+
/* Lookup HostSystem */
- if (parsedUri->path_hostSystem == NULL &&
- STREQ(ctx->computeResource->_reference->type,
+ if (STREQ(ctx->computeResource->_reference->type,
"ClusterComputeResource")) {
- ESX_VI_ERROR(VIR_ERR_INVALID_ARG, "%s",
- _("Path has to specify the host system"));
- return -1;
+ if (item == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INVALID_ARG,
+ _("Path '%s' does not specify a host system"),
+ parsedUri->path);
+ goto cleanup;
+ }
+
+ /* The path specified a cluster, it has to specify a host system too */
+ previousItem = item;
+ item = strtok_r(NULL, "/", &saveptr);
}
- if (parsedUri->path_hostSystem != NULL ||
- (parsedUri->path_computeResource != NULL &&
- parsedUri->path_hostSystem == NULL)) {
- if (parsedUri->path_hostSystem != NULL) {
- hostSystemName = parsedUri->path_hostSystem;
- } else {
- hostSystemName = parsedUri->path_computeResource;
- }
+ if (item != NULL) {
+ ESX_VI_ERROR(VIR_ERR_INVALID_ARG,
+ _("Path '%s' ends with an excess item"),
+ parsedUri->path);
+ goto cleanup;
+ }
+
+ parsedUri->path_hostSystem = strdup(previousItem);
+
+ if (parsedUri->path_hostSystem == NULL) {
+ virReportOOMError();
+ goto cleanup;
}
- if (esxVI_LookupHostSystem(ctx, hostSystemName,
+ if (esxVI_LookupHostSystem(ctx, parsedUri->path_hostSystem,
ctx->computeResource->_reference, NULL,
&ctx->hostSystem,
- esxVI_Occurrence_RequiredItem) < 0) {
- return -1;
+ esxVI_Occurrence_OptionalItem) < 0) {
+ goto cleanup;
}
- return 0;
+ if (ctx->hostSystem == NULL) {
+ ESX_VI_ERROR(VIR_ERR_INTERNAL_ERROR,
+ _("Could not find host system specified in '%s'"),
+ parsedUri->path);
+ goto cleanup;
+ }
+
+ result = 0;
+
+ cleanup:
+ if (result < 0) {
+ virBufferFreeAndReset(&buffer);
+ }
+
+ if (root != ctx->service->rootFolder &&
+ (ctx->datacenter == NULL || root != ctx->datacenter->hostFolder)) {
+ esxVI_ManagedObjectReference_Free(&root);
+ }
+
+ VIR_FREE(path);
+ esxVI_Folder_Free(&folder);
+
+ return result;
}
int
@@ -1469,8 +1628,7 @@ esxVI_BuildSelectSetCollection(esxVI_Context *ctx)
/* Folder -> childEntity (ManagedEntity) */
if (esxVI_BuildSelectSet(&ctx->selectSet_folderToChildEntity,
"folderToChildEntity",
- "Folder", "childEntity",
- "folderToChildEntity\0") < 0) {
+ "Folder", "childEntity", NULL) < 0) {
return -1;
}
@@ -1667,9 +1825,10 @@ esxVI_LookupObjectContentByType(esxVI_Context *ctx,
objectSpec->obj = root;
objectSpec->skip = esxVI_Boolean_False;
- if (STRNEQ(root->type, type)) {
+ if (STRNEQ(root->type, type) || STREQ(root->type, "Folder")) {
if (STREQ(root->type, "Folder")) {
- if (STREQ(type, "Datacenter") || STREQ(type, "ComputeResource") ||
+ if (STREQ(type, "Folder") || STREQ(type, "Datacenter") ||
+ STREQ(type, "ComputeResource") ||
STREQ(type, "ClusterComputeResource")) {
objectSpec->selectSet = ctx->selectSet_folderToChildEntity;
} else {
diff --git a/src/esx/esx_vi_generator.input b/src/esx/esx_vi_generator.input
index 361a6e7..1a67a8c 100644
--- a/src/esx/esx_vi_generator.input
+++ b/src/esx/esx_vi_generator.input
@@ -755,6 +755,10 @@ managed object Datacenter extends ManagedEntity
end
+managed object Folder extends ManagedEntity
+end
+
+
managed object HostSystem extends ManagedEntity
HostConfigManager configManager r
end
--
1.7.4.1
13 years, 2 months
[libvirt] [PATCH] mingw: Don't use interface as an identifier
by Matthias Bolte
Because it's a define use in MSCOM and its usage as
identifier results in a compile error.
---
tools/virsh.c | 24 ++++++++++--------------
1 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/tools/virsh.c b/tools/virsh.c
index 629233f..0f00463 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -1256,9 +1256,8 @@ static const vshCmdOptDef opts_domif_setlink[] = {
static bool
cmdDomIfSetLink (vshControl *ctl, const vshCmd *cmd)
{
-
virDomainPtr dom;
- const char *interface;
+ const char *iface;
const char *state;
const char *mac;
const char *desc;
@@ -1266,21 +1265,19 @@ cmdDomIfSetLink (vshControl *ctl, const vshCmd *cmd)
bool ret = false;
unsigned int flags = 0;
int i;
-
xmlDocPtr xml = NULL;
xmlXPathContextPtr ctxt = NULL;
xmlXPathObjectPtr obj = NULL;
xmlNodePtr cur = NULL;
xmlBufferPtr xml_buf = NULL;
-
if (!vshConnectionUsability(ctl, ctl->conn))
return false;
if (!(dom = vshCommandOptDomain(ctl, cmd, NULL)))
return false;
- if (vshCommandOptString(cmd, "interface", &interface) <= 0)
+ if (vshCommandOptString(cmd, "interface", &iface) <= 0)
goto cleanup;
if (vshCommandOptString(cmd, "state", &state) <= 0)
@@ -1332,7 +1329,7 @@ cmdDomIfSetLink (vshControl *ctl, const vshCmd *cmd)
xmlStrEqual(cur->name, BAD_CAST "mac")) {
mac = virXMLPropString(cur, "address");
- if (STRCASEEQ(mac, interface)) {
+ if (STRCASEEQ(mac, iface)) {
VIR_FREE(mac);
goto hit;
}
@@ -1342,7 +1339,7 @@ cmdDomIfSetLink (vshControl *ctl, const vshCmd *cmd)
}
}
- vshError(ctl, _("interface with address '%s' not found"), interface);
+ vshError(ctl, _("interface with address '%s' not found"), iface);
goto cleanup;
hit:
@@ -1424,13 +1421,12 @@ static bool
cmdDomIfGetLink (vshControl *ctl, const vshCmd *cmd)
{
virDomainPtr dom;
- const char *interface = NULL;
+ const char *iface = NULL;
int flags = 0;
char *state = NULL;
char *mac = NULL;
bool ret = false;
int i;
-
char *desc;
xmlDocPtr xml = NULL;
xmlXPathContextPtr ctxt = NULL;
@@ -1443,7 +1439,7 @@ cmdDomIfGetLink (vshControl *ctl, const vshCmd *cmd)
if (!(dom = vshCommandOptDomain (ctl, cmd, NULL)))
return false;
- if (vshCommandOptString (cmd, "interface", &interface) <= 0) {
+ if (vshCommandOptString (cmd, "interface", &iface) <= 0) {
virDomainFree(dom);
return false;
}
@@ -1481,7 +1477,7 @@ cmdDomIfGetLink (vshControl *ctl, const vshCmd *cmd)
mac = virXMLPropString(cur, "address");
- if (STRCASEEQ(mac, interface)){
+ if (STRCASEEQ(mac, iface)){
VIR_FREE(mac);
goto hit;
}
@@ -1490,7 +1486,7 @@ cmdDomIfGetLink (vshControl *ctl, const vshCmd *cmd)
}
}
- vshError(ctl, _("Interface with address '%s' not found."), interface);
+ vshError(ctl, _("Interface with address '%s' not found."), iface);
goto cleanup;
hit:
@@ -1500,7 +1496,7 @@ hit:
xmlStrEqual(cur->name, BAD_CAST "link")) {
state = virXMLPropString(cur, "state");
- vshPrint(ctl, "%s %s", interface, state);
+ vshPrint(ctl, "%s %s", iface, state);
VIR_FREE(state);
goto cleanup;
@@ -1509,7 +1505,7 @@ hit:
}
/* attribute not found */
- vshPrint(ctl, "%s default", interface);
+ vshPrint(ctl, "%s default", iface);
cleanup:
xmlXPathFreeObject(obj);
--
1.7.4.1
13 years, 2 months