[libvirt] [PATCH] qemu: Adapt to new log format
by Michal Privoznik
Since 586502189edf9fd0f89a83de96717a2ea826fdb0 qemu commit, the log
lines reporting chardev's path has changed from:
$ ./x86_64-softmmu/qemu-system-x86_64 -serial pty -serial pty -monitor pty
char device redirected to /dev/pts/5
char device redirected to /dev/pts/6
char device redirected to /dev/pts/7
to:
$ ./x86_64-softmmu/qemu-system-x86_64 -serial pty -serial pty -monitor pty
char device compat_monitor0 redirected to /dev/pts/5
char device serial0 redirected to /dev/pts/6
char device serial1 redirected to /dev/pts/7
However, with current code we are not prepared for such change, which
results in us being unable to start any domain.
---
src/qemu/qemu_process.c | 33 +++++++++++++++++++++++++++++----
1 file changed, 29 insertions(+), 4 deletions(-)
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index eac6553..29bd082 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1431,22 +1431,43 @@ cleanup:
*
* char device redirected to /dev/pts/3
*
+ * However, since 1.4 the line we are looking for has changed to:
+ *
+ * char device <alias> redirected to /some/path
+ *
* Returns -1 for error, 0 success, 1 continue reading
*/
static int
qemuProcessExtractTTYPath(const char *haystack,
size_t *offset,
+ const char *alias,
char **path)
{
- static const char needle[] = "char device redirected to";
- char *tmp, *dev;
+ static const char *needle[] = {"char device", "redirected to"};
+ const char *tmp, *dev;
VIR_FREE(*path);
/* First look for our magic string */
- if (!(tmp = strstr(haystack + *offset, needle))) {
+ if (!(tmp = strstr(haystack + *offset, needle[0])))
return 1;
+
+ tmp += strlen(needle[0]);
+ virSkipSpaces(&tmp);
+
+ if (STRPREFIX(tmp, "char")) {
+ /* we are dealing with new style */
+ tmp += strlen("char");
+ if (!STRPREFIX(tmp, alias))
+ return 1;
+
+ tmp += strlen(alias);
+ virSkipSpaces(&tmp);
}
- tmp += sizeof(needle);
+
+ if (!STRPREFIX(tmp, needle[1]))
+ return 1;
+
+ tmp += strlen(needle[1]);
dev = tmp;
/*
@@ -1569,6 +1590,7 @@ qemuProcessFindCharDevicePTYs(virDomainObjPtr vm,
virDomainChrDefPtr chr = vm->def->serials[i];
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
if ((ret = qemuProcessExtractTTYPath(output, &offset,
+ chr->info.alias,
&chr->source.data.file.path)) != 0)
return ret;
}
@@ -1579,6 +1601,7 @@ qemuProcessFindCharDevicePTYs(virDomainObjPtr vm,
virDomainChrDefPtr chr = vm->def->parallels[i];
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
if ((ret = qemuProcessExtractTTYPath(output, &offset,
+ chr->info.alias,
&chr->source.data.file.path)) != 0)
return ret;
}
@@ -1589,6 +1612,7 @@ qemuProcessFindCharDevicePTYs(virDomainObjPtr vm,
virDomainChrDefPtr chr = vm->def->channels[i];
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY) {
if ((ret = qemuProcessExtractTTYPath(output, &offset,
+ chr->info.alias,
&chr->source.data.file.path)) != 0)
return ret;
}
@@ -1608,6 +1632,7 @@ qemuProcessFindCharDevicePTYs(virDomainObjPtr vm,
if (chr->source.type == VIR_DOMAIN_CHR_TYPE_PTY &&
chr->targetType == VIR_DOMAIN_CHR_CONSOLE_TARGET_TYPE_VIRTIO) {
if ((ret = qemuProcessExtractTTYPath(output, &offset,
+ chr->info.alias,
&chr->source.data.file.path)) != 0)
return ret;
}
--
1.8.0.2
11 years, 12 months
[libvirt] [PATCH] ESX: append CURL headers to fix serviceContent entities
by Ata E Husain Bohra
Append curl headers with "SOAPAction" header to populate
serviceContent object entities which are otherwise missing.
---
src/esx/esx_vi.c | 2 ++
src/esx/esx_vi_generator.input | 3 +++
2 files changed, 5 insertions(+)
diff --git a/src/esx/esx_vi.c b/src/esx/esx_vi.c
index 99c1eb1..a379183 100644
--- a/src/esx/esx_vi.c
+++ b/src/esx/esx_vi.c
@@ -322,6 +322,8 @@ esxVI_CURL_Connect(esxVI_CURL *curl, esxUtil_ParsedUri *parsedUri)
* approx. 2 sec per POST operation.
*/
curl->headers = curl_slist_append(curl->headers, "Expect:");
+ curl->headers = curl_slist_append(curl->headers,
+ _("SOAPAction: \"urn:vim25\""));
if (curl->headers == NULL) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
diff --git a/src/esx/esx_vi_generator.input b/src/esx/esx_vi_generator.input
index 22c114e..236dcb3 100644
--- a/src/esx/esx_vi_generator.input
+++ b/src/esx/esx_vi_generator.input
@@ -1008,6 +1008,8 @@ object ServiceContent
ManagedObjectReference clusterProfileManager o
ManagedObjectReference complianceManager o
ManagedObjectReference localizationManager o
+ ManagedObjectReference storageResourceManager o
+ ManagedObjectReference guestOperationsManager o
end
@@ -1073,6 +1075,7 @@ object UserSession
DateTime lastActiveTime r
String locale r
String messageLocale r
+ Boolean extensionSession o
end
--
1.7.9.5
11 years, 12 months
[libvirt] [PATCH] sanlock: Chown lease files as well
by Michal Privoznik
Since sanlock doesn't run under root:root, we have chown()'ed the
__LIBVIRT__DISKS__ lease file to the user:group defined in the
sanlock config. However, when writing the patch I've forgot about
lease files for each disk (this is the
/var/lib/libvirt/sanlock/<md5>) file.
---
src/locking/lock_driver_sanlock.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/src/locking/lock_driver_sanlock.c b/src/locking/lock_driver_sanlock.c
index 75ced84..c955003 100644
--- a/src/locking/lock_driver_sanlock.c
+++ b/src/locking/lock_driver_sanlock.c
@@ -679,6 +679,17 @@ static int virLockManagerSanlockCreateLease(struct sanlk_resource *res)
}
VIR_DEBUG("Someone else just created lockspace %s", res->disks[0].path);
} else {
+ /* chown() the path to make sure sanlock can access it */
+ if ((driver->user != -1 || driver->group != -1) &&
+ (fchown(fd, driver->user, driver->group) < 0)) {
+ virReportSystemError(errno,
+ _("cannot chown '%s' to (%u, %u)"),
+ res->disks[0].path,
+ (unsigned int) driver->user,
+ (unsigned int) driver->group);
+ goto error_unlink;
+ }
+
if ((rv = sanlock_align(&res->disks[0])) < 0) {
if (rv <= -200)
virReportError(VIR_ERR_INTERNAL_ERROR,
--
1.8.0.2
11 years, 12 months
[libvirt] [test-API][PATCH V2] managedsave: change the checking method about bypass cache
by hongming
Check whether the O_DIRECT flag is in use on the managed save file
during the duration of the managed save, since that is the real effect
of the current implementation of the --bypass-cache flag.
Add break condition into the while loop of get_fileflags()
method in the PATCH V2
---
repos/managedsave/managedsave.py | 61 ++++++++++++++++++++------------------
1 files changed, 32 insertions(+), 29 deletions(-)
diff --git a/repos/managedsave/managedsave.py b/repos/managedsave/managedsave.py
index 4a92151..70d4ebc 100644
--- a/repos/managedsave/managedsave.py
+++ b/repos/managedsave/managedsave.py
@@ -2,6 +2,8 @@
import os
import math
+import thread
+import time
import libvirt
from libvirt import libvirtError
@@ -39,39 +41,43 @@ def check_savefile_create(*args):
logger.info("managed save file exists")
return True
-def compare_cachedfile(cachebefore, cacheafter):
- """Compare cached value before managed save and its value after
- managed save """
+def get_fileflags():
+ """Get the file flags of managed save file"""
+ cmds = "cat /proc/$(lsof -w /var/lib/libvirt/qemu/save/"+guestname+".save"\
+ "|awk '/libvirt_i/{print $2}')/fdinfo/1|grep flags|awk '{print $NF}'"
+ global fileflags
+ while True:
+ (status, output) = utils.exec_cmd(cmds, shell=True)
+ if status == 0:
+ if len(output) == 1:
+ logger.info("The flags of saved file %s " % output[0])
+ fileflags = output[0][-5]
+ break
+ else:
+ logger.error("Fail to get the flags of saved file")
+ return 1
+
+ thread.exit_thread()
- diff = cacheafter - cachebefore
- logger.info("diff is %s " % diff)
- percent = math.fabs(diff)/cachebefore
- logger.info("diff percent is %s " % percent)
- if percent < 0.05:
+def check_fileflag(fileflags):
+ """Check the file flags of managed save file if include O_DIRECT"""
+ if int(fileflags) == 4:
+ logger.info("file flags include O_DIRECT")
return True
else:
+ logger.error("file flags doesn't include O_DIRECT")
return False
-def get_cachevalue():
- """Get the file system cached value """
-
- cmds = "head -n4 /proc/meminfo|grep Cached|awk '{print $2}'"
- (status, output) = utils.exec_cmd(cmds, shell=True)
- if status != 0:
- logger.error("Fail to run cmd line to get cache")
- return 1
- else:
- logger.debug(output[0])
- cachevalue= int(output[0])
- return cachevalue
-
def managedsave(params):
"""Managed save a running domain"""
global logger
logger = params['logger']
+ global guestname
guestname = params['guestname']
flags = params ['flags']
+ global fileflags
+ fileflags = ''
#Save given flags to sharedmod.data
sharedmod.data['flagsave'] = flags
@@ -122,17 +128,14 @@ def managedsave(params):
#If given flags include bypass-cache,check if bypass file system cache
if flagn % 2 == 1:
- logger.info("Given flags include --bypass-cache")
- os.system('echo 3 > /proc/sys/vm/drop_caches')
- cache_before = get_cachevalue()
- logger.info("Cached value before managedsave is %s" % cache_before)
+ logger.info("Given flags include --bypass-cache")
+ thread.start_new_thread(get_fileflags,())
+ # Guarantee get_fileflags shell has run before managed save
+ time.sleep(5)
domobj.managedSave(flagn)
- cache_after = get_cachevalue()
- logger.info("Cached value after managedsave is %s" % cache_after)
-
- if compare_cachedfile(cache_before, cache_after):
+ if check_fileflag(fileflags):
logger.info("Bypass file system cache successfully")
else:
logger.error("Bypass file system cache failed")
--
1.7.7.6
11 years, 12 months
[libvirt] virsh "net-create" explanation
by Bilal Ahmad
Hi all,
I am new to libvirt and started looking at the source code. While tracing
back the virsh command "net-create", I got stuck into a loop and I would
really like someone to explain how this works.
In the virsh-network.c, from:
network = virNetworkCreateXML(ctl->conn, buffer);
I traced back to:
if (conn->networkDriver && conn->networkDriver->networkCreateXML) {
virNetworkPtr ret;
ret = conn->networkDriver->networkCreateXML(conn, xmlDesc);
Then I traced back to the following struct to find how networkCreateXML is
working:
struct _virNetworkDriver {
const char * name; /* the name of the driver */
virDrvOpen open;
virDrvClose close;
virDrvNumOfNetworks numOfNetworks;
virDrvListNetworks listNetworks;
virDrvNumOfDefinedNetworks numOfDefinedNetworks;
virDrvListDefinedNetworks listDefinedNetworks;
virDrvListAllNetworks listAllNetworks;
virDrvNetworkLookupByUUID networkLookupByUUID;
virDrvNetworkLookupByName networkLookupByName;
virDrvNetworkCreateXML networkCreateXML;
>From the above code, I located the definition of virDrvNetworkCreateXML and
found the following:
typedef virNetworkPtr
(*virDrvNetworkCreateXML) (virConnectPtr conn,
const char *xmlDesc);
This is where I am unable to trace back to any other code. Can someone
please explain the code with typedef above and where is the definition of
the function this function pointer is pointing to? I want to get to the
root of the networkCreateXML function and how it exactly works. Can someone
please explain?
Thanks a lot,
Bilal
12 years
[libvirt] [PATCH] python: Adapt to virevent rename
by Michal Privoznik
With our recent renames under src/util/* we forgot to adapt
python wrapper code generator. This results in some methods being
not exposed:
$ python examples/domain-events/events-python/event-test.py
Using uri:qemu:///system
Traceback (most recent call last):
File "examples/domain-events/events-python/event-test.py", line 585, in <module>
main()
File "examples/domain-events/events-python/event-test.py", line 543, in main
virEventLoopPureStart()
File "examples/domain-events/events-python/event-test.py", line 416, in virEventLoopPureStart
virEventLoopPureRegister()
File "examples/domain-events/events-python/event-test.py", line 397, in virEventLoopPureRegister
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
AttributeError: 'module' object has no attribute 'virEventRegisterImpl'
---
Pushed under trivial rule.
python/generator.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/python/generator.py b/python/generator.py
index e9b9270..bae4edc 100755
--- a/python/generator.py
+++ b/python/generator.py
@@ -132,7 +132,7 @@ class docParser(xml.sax.handler.ContentHandler):
if tag == 'function':
if self.function != None:
if (self.function_module == "libvirt" or
- self.function_module == "event" or
+ self.function_module == "virevent" or
self.function_module == "virterror"):
function(self.function, self.function_descr,
self.function_return, self.function_args,
--
1.8.0.2
12 years
[libvirt] [test-API][PATCH] Add volume upload and download cases
by Wayne Sun
* test download storage volumes using storage download API.
* test upload storage volumes using storage upload API.
For upload case, only raw volume format is supported, other
format will fail.
The offset and length value should be chosen from 0 and
1048576, because upload size is set as 1M.
* both case use blocking stream.
* sample conf is added.
Signed-off-by: Wayne Sun <gsun(a)redhat.com>
---
cases/storage_vol_upload_download.conf | 127 ++++++++++++++++++++++
repos/storage/vol_download.py | 157 +++++++++++++++++++++++++++
repos/storage/vol_upload.py | 183 ++++++++++++++++++++++++++++++++
3 files changed, 467 insertions(+), 0 deletions(-)
create mode 100644 cases/storage_vol_upload_download.conf
create mode 100644 repos/storage/vol_download.py
create mode 100644 repos/storage/vol_upload.py
diff --git a/cases/storage_vol_upload_download.conf b/cases/storage_vol_upload_download.conf
new file mode 100644
index 0000000..b393814
--- /dev/null
+++ b/cases/storage_vol_upload_download.conf
@@ -0,0 +1,127 @@
+storage:create_dir_pool
+ poolname
+ $defaultpoolname
+
+storage:vol_upload
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 10M
+ volformat
+ raw
+ offset
+ 0
+ length
+ 0
+clean
+
+storage:vol_upload
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 10M
+ volformat
+ raw
+ offset
+ 1048576
+ length
+ 0
+clean
+
+storage:vol_upload
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 10M
+ volformat
+ raw
+ offset
+ 0
+ length
+ 1048576
+clean
+
+storage:vol_upload
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 10M
+ volformat
+ raw
+ offset
+ 1048576
+ length
+ 1048576
+clean
+
+storage:vol_download
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 50M
+ volformat
+ raw
+ offset
+ 0
+ length
+ 0
+clean
+
+storage:vol_download
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 50M
+ volformat
+ qcow2
+ offset
+ 1048576
+ length
+ 0
+clean
+
+storage:vol_download
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 50M
+ volformat
+ qed
+ offset
+ 0
+ length
+ 1048576
+clean
+
+storage:vol_download
+ poolname
+ $defaultpoolname
+ volname
+ $defaultvolumename
+ capacity
+ 50M
+ volformat
+ raw
+ offset
+ 1048576
+ length
+ 1048576
+clean
+
+storage:destroy_pool
+ poolname
+ $defaultpoolname
diff --git a/repos/storage/vol_download.py b/repos/storage/vol_download.py
new file mode 100644
index 0000000..839bc8a
--- /dev/null
+++ b/repos/storage/vol_download.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# storage volume download testing
+
+import os
+import string
+import hashlib
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+from utils import utils
+
+required_params = ('poolname', 'volname', 'volformat', 'capacity', 'offset',
+ 'length',)
+optional_params = {'xml' : 'xmls/dir_volume.xml',
+ }
+
+def get_pool_path(poolobj):
+ """ get pool xml description
+ """
+ poolxml = poolobj.XMLDesc(0)
+
+ logger.debug("the xml description of pool is %s" % poolxml)
+
+ doc = minidom.parseString(poolxml)
+ path_element = doc.getElementsByTagName('path')[0]
+ textnode = path_element.childNodes[0]
+ path_value = textnode.data
+
+ return path_value
+
+def write_file(path, capacity):
+ """write test data to file
+ """
+ logger.info("write %s data into file %s" % (capacity, path))
+ out = utils.get_capacity_suffix_size(capacity)
+ f = open(path, 'w')
+ datastr = ''.join(string.lowercase + string.uppercase
+ + string.digits + '.' + '\n')
+ repeat = out['capacity_byte'] / 64
+ data = ''.join(repeat * datastr)
+ f.write(data)
+ f.close()
+
+def digest(path, offset, length):
+ """read data from file with length bytes, begin at offset
+ and return md5 hexdigest
+ """
+ f = open(path, 'r')
+ f.seek(offset)
+ m = hashlib.md5()
+ done = 0
+
+ while True:
+ want = 1024
+ if length and length - done < want:
+ want = length - done
+ outstr = f.read(want)
+ got = len(outstr)
+ if got == 0:
+ break
+ done += got
+ m.update(outstr)
+
+ logger.debug("total %s bytes data is readed" % done)
+
+ f.close()
+ return m.hexdigest()
+
+def handler(stream, data, file_):
+ return file_.write(data)
+
+def vol_download(params):
+ """test volume download and check"""
+
+ global logger
+ logger = params['logger']
+ poolname = params['poolname']
+ volname = params['volname']
+ volformat = params['volformat']
+ offset = int(params['offset'])
+ length = int(params['length'])
+ capacity = params['capacity']
+ xmlstr = params['xml']
+
+ logger.info("the poolname is %s, volname is %s, volformat is %s" %
+ (poolname, volname, volformat))
+ logger.info("download offset is: %s" % offset)
+ logger.info("the data length to download is: %s" % length)
+
+ conn = sharedmod.libvirtobj['conn']
+ try:
+ poolobj = conn.storagePoolLookupByName(poolname)
+ path_value = get_pool_path(poolobj)
+ volume_path = path_value + "/" + volname
+
+ xmlstr = xmlstr.replace('VOLPATH', volume_path)
+ xmlstr = xmlstr.replace('SUFFIX', capacity[-1])
+ xmlstr = xmlstr.replace('CAP', capacity[:-1])
+ logger.debug("volume xml:\n%s" % xmlstr)
+
+ logger.info("create %s raw volume" % volname)
+ vol = poolobj.createXML(xmlstr, 0)
+
+ write_file(volume_path, capacity)
+ origdigest = digest(volume_path, offset, length)
+ logger.debug("the md5 hex digest of data read from %s is: %s" %
+ (volume_path, origdigest))
+
+ st = conn.newStream(0)
+
+ test_path = path_value + "/" + "vol_test"
+
+ f = open(test_path, 'w')
+ logger.info("start download")
+ vol.download(st, offset, length, 0)
+ logger.info("downloaded all data")
+ st.recvAll(handler, f)
+ logger.info("finished stream")
+ st.finish()
+ f.close()
+
+ newdigest = digest(test_path, 0, 0)
+ logger.debug("the md5 hex digest of data read from %s is: %s" %
+ (test_path, newdigest))
+
+ if origdigest == newdigest:
+ logger.info("file digests match, download succeed")
+ else:
+ logger.error("file digests not match, download failed")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
+
+def vol_download_clean(params):
+ """clean testing environment"""
+ poolname = params['poolname']
+ volname = params['volname']
+
+ conn = sharedmod.libvirtobj['conn']
+ poolobj = conn.storagePoolLookupByName(poolname)
+ path_value = get_pool_path(poolobj)
+ test_path = path_value + "/" + "vol_test"
+
+ vol = poolobj.storageVolLookupByName(volname)
+ vol.delete(0)
+
+ if os.path.exists(test_path):
+ os.unlink(test_path)
+
+ return 0
diff --git a/repos/storage/vol_upload.py b/repos/storage/vol_upload.py
new file mode 100644
index 0000000..42af06f
--- /dev/null
+++ b/repos/storage/vol_upload.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python
+# storage volume upload testing, only raw format volume is
+# supported, other format might fail. offset and length can
+# only be chosen in 0 and 1048576.
+
+import os
+import string
+import hashlib
+from xml.dom import minidom
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ('poolname', 'volname', 'volformat', 'capacity',
+ 'offset', 'length',)
+optional_params = {'xml' : 'xmls/dir_volume.xml',
+ }
+
+def get_pool_path(poolobj):
+ """ get pool xml description
+ """
+ poolxml = poolobj.XMLDesc(0)
+
+ logger.debug("the xml description of pool is %s" % poolxml)
+
+ doc = minidom.parseString(poolxml)
+ path_element = doc.getElementsByTagName('path')[0]
+ textnode = path_element.childNodes[0]
+ path_value = textnode.data
+
+ return path_value
+
+def write_file(path):
+ """write 1M test data to file
+ """
+ logger.info("write data into file %s" % path)
+ f = open(path, 'w')
+ datastr = ''.join(string.lowercase + string.uppercase
+ + string.digits + '.' + '\n')
+ data = ''.join(16384 * datastr)
+ f.write(data)
+ f.close()
+
+def digest(path, offset, length):
+ """read data from file with length bytes, begin at offset
+ and return md5 hexdigest
+ """
+ f = open(path, 'r')
+ f.seek(offset)
+ m = hashlib.md5()
+ done = 0
+
+ while True:
+ want = 1024
+ if length and length - done < want:
+ want = length - done
+ outstr = f.read(want)
+ got = len(outstr)
+ if got == 0:
+ break
+ done += got
+ m.update(outstr)
+
+ logger.debug("total %s bytes data is readed" % done)
+
+ f.close()
+ return m.hexdigest()
+
+def handler(stream, data, file_):
+ return file_.read(data)
+
+def vol_upload(params):
+ """test volume download and check"""
+
+ global logger
+ logger = params['logger']
+ poolname = params['poolname']
+ volname = params['volname']
+ volformat = params['volformat']
+ offset = int(params['offset'])
+ length = int(params['length'])
+ capacity = params['capacity']
+ xmlstr = params['xml']
+
+ logger.info("the poolname is %s, volname is %s, volformat is %s" %
+ (poolname, volname, volformat))
+ logger.info("upload offset is: %s" % offset)
+ logger.info("the data length to upload is: %s" % length)
+
+ conn = sharedmod.libvirtobj['conn']
+ try:
+ poolobj = conn.storagePoolLookupByName(poolname)
+ path_value = get_pool_path(poolobj)
+ volume_path = path_value + "/" + volname
+
+ xmlstr = xmlstr.replace('VOLPATH', volume_path)
+ xmlstr = xmlstr.replace('SUFFIX', capacity[-1])
+ xmlstr = xmlstr.replace('CAP', capacity[:-1])
+ logger.debug("volume xml:\n%s" % xmlstr)
+
+ logger.info("create %s raw volume" % volname)
+ vol = poolobj.createXML(xmlstr, 0)
+
+ test_path = path_value + "/" + "vol_test"
+ write_file(test_path)
+ olddigest = digest(test_path, 0, 0)
+ logger.debug("the old file digest is: %s" % olddigest)
+
+ if offset:
+ origdigestpre = digest(volume_path, 0, offset)
+ else:
+ origdigestpre = ''
+ logger.debug("the original pre region digest is: %s" % origdigestpre)
+
+ origdigestpost = digest(volume_path, offset + 1024 * 1024, 0)
+ logger.debug("the original post region digest is: %s" % origdigestpost)
+
+ st = conn.newStream(0)
+
+ f = open(test_path, 'r')
+ logger.info("start upload")
+ vol.upload(st, offset, length, 0)
+ logger.info("sent all data")
+ st.sendAll(handler, f)
+ logger.info("finished stream")
+ st.finish()
+ f.close()
+
+ newdigest = digest(volume_path, offset, 1024 * 1024)
+ logger.debug("the new file digest is: %s" % olddigest)
+
+ if offset:
+ newdigestpre = digest(volume_path, 0, offset)
+ else:
+ newdigestpre = ''
+ logger.debug("the new pre region digest is: %s" % origdigestpre)
+
+ newdigestpost = digest(volume_path, offset + 1024 * 1024, 0)
+ logger.debug("the new post region digest is: %s" % origdigestpost)
+
+ if newdigestpre == origdigestpre:
+ logger.info("file pre region digests match")
+ else:
+ logger.error("file pre region digests not match")
+ return 1
+
+ if olddigest == newdigest:
+ logger.info("file digests match")
+ else:
+ logger.error("file digests not match")
+ return 1
+
+ if newdigestpost == origdigestpost:
+ logger.info("file post region digests match")
+ else:
+ logger.error("file post region digests not match")
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
+
+def vol_upload_clean(params):
+ """clean testing environment"""
+ poolname = params['poolname']
+ volname = params['volname']
+
+ conn = sharedmod.libvirtobj['conn']
+ poolobj = conn.storagePoolLookupByName(poolname)
+ path_value = get_pool_path(poolobj)
+ test_path = path_value + "/" + "vol_test"
+
+ vol = poolobj.storageVolLookupByName(volname)
+ vol.delete(0)
+
+ if os.path.exists(test_path):
+ os.unlink(test_path)
+
+ return 0
--
1.7.1
12 years
[libvirt] [PATCH 0/6] Fix multiple problems while dealing with transient networks
by Peter Krempa
This series fixes a few issues when dealing with transient networks:
1) Check for multiple DHCP sections is added also to transient networks
2) Create and clean up dnsmasq config files when dealing with transient networks
3) Add support for making network transient with undefine
4) a few cleanups
Peter Krempa (6):
conf: net: Fix helper for applying new network definition
net: Change argument type of virNetworkObjIsDuplicate()
net: Move creation of dnsmasq hosts file to function starting dnsmasq
net: Remove dnsmasq and radvd files also when destroying transient
nets
net: Re-use checks when creating transient networks
net: Add support for changing persistent networks to transient
src/conf/network_conf.c | 4 +-
src/conf/network_conf.h | 2 +-
src/network/bridge_driver.c | 261 +++++++++++++++++++++++---------------------
3 files changed, 138 insertions(+), 129 deletions(-)
--
1.7.12.4
12 years
Re: [libvirt] [Users] Vdsm/libvir error during deploy
by Dan Kenigsberg
On Mon, Dec 24, 2012 at 02:18:48PM +0100, Joop wrote:
> Dan Kenigsberg wrote:
> >Which version of libvirt is installed on your host?
> >
> libvirt-1.0.1-2.fc17.x86_64
> libvirt-client-1.0.1-2.fc17.x86_64
> libvirt-daemon-1.0.1-2.fc17.x86_64
> libvirt-daemon-config-network-1.0.1-2.fc17.x86_64
> libvirt-daemon-config-nwfilter-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-interface-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-lxc-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-network-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-nodedev-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-nwfilter-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-qemu-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-secret-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-storage-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-uml-1.0.1-2.fc17.x86_64
> libvirt-daemon-driver-xen-1.0.1-2.fc17.x86_64
> libvirt-lock-sanlock-1.0.1-2.fc17.x86_64
> libvirt-python-1.0.1-2.fc17.x86_64
>
> From virt-preview repo
> [fedora-virt-preview]
> name=Virtualization packages from Rawhide built for latest Fedora
> baseurl=http://fedorapeople.org/groups/virt/virt-preview/fedora-$releasev...
> enabled=1
> skip_if_unavailable=1
> gpgcheck=0
>
> >What is the output of the following python script on your machine? Mine
> >says "1". Could it be that your libvirt says "0"?
> >
> >=============
> >
> >from vdsm import libvirtconnection
> >
> >conn = libvirtconnection.get()
> >netXml = """<network>
> > <name>test</name>
> > <forward mode='passthrough'>
> > <interface dev='em1'/>
> > </forward>
> ></network>
> >"""
> >
> >net = conn.networkDefineXML(netXml)
> >net.create()
> >print net.isPersistent()
> >net.destroy()
> >net.undefine()
> >
> >
> 0
> libvir: Network Driver error : Network not found: no network with
> matching uuid
> Traceback (most recent call last):
> File "test.py", line 16, in <module>
> net.undefine()
> File "/usr/lib64/python2.7/site-packages/libvirt.py", line 2154, in
> undefine
> if ret == -1: raise libvirtError ('virNetworkUndefine() failed',
> net=self)
> libvirt.libvirtError: Network not found: no network with matching uuid
>
> So '0' :-((
> Suppose that error isn't good either.
> Should I downgrade to an earlier version of libvirt?
Please try. My guess is that this is a libvirt bug added by
http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=0211fd6e04cdc402da20...
in libvirt 1.0.1. More authoritative answer is expected by those added
to the CC line.
12 years
[libvirt] [PATCH] ESX: append CURL headers to fix serviceContent entities
by Ata E Husain Bohra
It seems with current SOAP header the call to retrieve "serviceContent" object
does not populate all available references (for instance: ovfManager). The host
exposes details of these Managed Object References (MoBs) if header is appended
with "SOAPACtion" field.
Ata E Husain Bohra (1):
ESX: append CURL headers to fix serviceContent entities
src/esx/esx_vi.c | 2 ++
src/esx/esx_vi_generator.input | 3 +++
2 files changed, 5 insertions(+)
--
1.7.9.5
12 years