[libvirt] [PATCH] Add the vol-* commands to the virsh man page.
by Justin Clift
---
tools/virsh.pod | 95
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 95 insertions(+), 0 deletions(-)
diff --git a/tools/virsh.pod b/tools/virsh.pod
index 422ae7f..cab931c 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -755,6 +755,101 @@ Returns the UUID of the named I<pool>.
=back
+=head1 VOLUME COMMANDS
+
+=item B<vol-create> I<pool-or-uuid> I<FILE>
+
+Create a volume from an XML <file>.
+I<pool-or-uuid> is the name or UUID of the storage pool to create the
volume in.
+I<FILE> is the XML <file> with the volume definition. An easy way to
create the
+XML <file> is to use the B<vol-dumpxml> command to obtain the
definition of a
+pre-existing volume.
+
+B<Example>
+
+ virsh vol-dumpxml --pool storagepool1 appvolume1 > newvolume.xml
+ edit newvolume.xml
+ virsh vol-create differentstoragepool newvolume.xml
+
+=item B<vol-create-from> I<pool-or-uuid> I<FILE> [optional I<--inputpool>
+I<pool-or-uuid>] I<vol-name-or-key>
+
+Create a volume, using another volume as input.
+I<pool-or-uuid> is the name or UUID of the storage pool to create the
volume in.
+I<FILE> is the XML <file> with the volume definition.
+I<--inputpool> I<pool-or-uuid> is the name or uuid of the storage pool the
+source volume is in.
+I<vol-name-or-key> is the name or UUID of the source volume.
+
+=item B<vol-create-as> I<pool-or-uuid> I<name> I<capacity> optional
+I<--allocation> I<size> I<--format> I<string>
+
+Create a volume from a set of arguments.
+I<pool-or-uuid> is the name or UUID of the storage pool to create the
volume
+in.
+I<name> is the name of the new volume.
+I<capacity> is the size of the volume to be created, with optional k,
M, G, or
+T suffix.
+I<--allocation> I<size> is the initial size to be allocated in the
volume, with
+optional k, M, G, or T suffix.
+I<--format> I<string> is used in file based storage pools to specify
the volume
+file format to use; raw, bochs, qcow, qcow2, vmdk.
+
+=item B<vol-clone> [optional I<--pool> I<pool-or-uuid>]
I<vol-name-or-key> I<name>
+
+Clone an existing volume. Less powerful, but easier to type, version of
+B<vol-create-from>.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool to
create the volume in.
+I<vol-name-or-key> is the name or UUID of the source volume.
+I<name> is the name of the new volume.
+
+=item B<vol-delete> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key>
+
+Delete a given volume.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the
volume is in.
+I<vol-name-or-key> is the name or UUID of the volume to delete.
+
+=item B<vol-wipe> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key>
+
+Wipe a volume, ensure data previously on the volume is not accessible
to future reads.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the
volume is in.
+I<vol-name-or-key> is the name or UUID of the volume to wipe.
+
+=item B<vol-dumpxml> [optional I<--pool> I<pool-or-uuid>]
I<vol-name-or-key>
+
+Output the volume information as an XML dump to stdout.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the
volume is in.
+I<vol-name-or-key> is the name or UUID of the volume to output the XML of.
+
+=item B<vol-info> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key>
+
+Returns basic information about the given storage volume.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the
volume is in.
+I<vol-name-or-key> is the name or UUID of the volume to return
information for.
+
+=item B<vol-list> I<--pool> I<pool-or-uuid>
+
+Return the list of volumes in the given storage pool.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool.
+
+=item B<vol-path> [optional I<--pool> I<pool-or-uuid>] I<vol-name-or-key>
+
+Return the path for a given volume.
+I<--pool> I<pool-or-uuid> is the name or UUID of the storage pool the
volume is in.
+I<vol-name-or-key> is the name or key of the volume to return the path for.
+
+=item B<vol-name> I<vol-key-or-path>
+
+Return the name for a given volume.
+I<vol-key-or-path> is the key or path of the volume to return the name for.
+
+=item B<vol-key> I<vol-uuid>
+
+Return the volume key for a given volume UUID.
+I<vol-uuid> is the UUID of the volume to return the volume key for.
+
+=back
+
=head1 SECRET COMMMANDS
The following commands manipulate "secrets" (e.g. passwords,
passphrases and
--
1.7.0.1
14 years, 5 months
[libvirt] Question about migration requirement
by Bruce Rogers
I've noticed that there seems to be some additional requirements placed on a migration, as compared to just remote management via libvirt.
For example, when I try to do the following:
virsh migrate running_vm_name qemu+ssh://1.2.3.4/system
there seems to be a requirment that both the source and target hosts must resolve 1.2.3.4 the same way as far as naming services is concerned. Without being able to resolve to the same name on both the source and target, the migration will fail.
Note that I can ssh to the other machine just fine using the ip address, or even do the following without problem:
virsh -c qemu+ssh://1.2.3.4/system <some-command>
Is this additional <requirement> for migration intentional (some security issue, etc), or is it just an artifact of implementation, which could be fixed to not be needlessly restrictive.
Bruce
14 years, 5 months
[libvirt] [PATCH] autobuild.sh: avoid bashism
by Eric Blake
* autobuild.sh: Replace 'set -o pipefail' with POSIX alternative.
Reported by Matthias Bolte.
---
On IRC, Matthias pointed out that his testing of my recent
autobuild.sh patch triggered a dash failure. Unfortunately,
the checkbashisms script is not smart enough to realize that
'set -o pipefail' is a bashism, yet it also does not provide
a bug reporting address in checkbashisms --help :(
autobuild.sh | 12 +++++++++---
1 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/autobuild.sh b/autobuild.sh
index 4de5af8..66ba132 100755
--- a/autobuild.sh
+++ b/autobuild.sh
@@ -35,9 +35,15 @@ esac
make
make install
-set -o pipefail
-make check 2>&1 | tee "$RESULTS"
-make syntax-check 2>&1 | tee -a "$RESULTS"
+# set -o pipefail is a bashism; this use of exec is the POSIX alternative
+exec 3>&1
+st=$(
+ exec 4>&1 >&3
+ { make check syntax-check 2>&1; echo $? >&4; } \
+ | tee "$RESULTS"
+)
+exec 3>&-
+test $st = 0
test -x /usr/bin/lcov && make cov
rm -f *.tar.gz
--
1.7.0.1
14 years, 5 months
[libvirt] Break up virsh man page into smaller sections?
by Justin Clift
Hi all,
Looking at the virsh man page (tools/virsh.pod), it's getting pretty
unwieldy and hard to grok for end users.
Wondering if it's worth breaking it up into sections?
Maybe something similar to how git structures their man pages?
ie.
man virsh
Giving an overview, the categories DEVICE, DOMAIN, NETWORKING,
STORAGE, (etc), and listing the more detailed man pages)
man virsh-console
(initially a copy of the existing virsh console man page contents.)
virsh console domain-id
Connect the virtual serial console for the guest.
man virsh-create
(initially a copy of the existing virsh create man page contents.)
virsh create FILE
Create a domain from an XML <file>. An easy way to create the XML
<file> is to use the dumpxml command to obtain the definition of
a pre-existing guest.
Example
# virsh dumpxml <domain-id> > domain.xml edit domain.xml virsh \
create < domain.xml
<etc>
???
Regards and best wishes,
Justin Clift
--
Salasaga - Open Source eLearning IDE
http://www.salasaga.org
14 years, 5 months
[libvirt] [PATCHv3 0/2] build: automate VPATH testing
by Eric Blake
Picking up on a patch series that I had started a while ago:
https://www.redhat.com/archives/libvir-list/2010-April/msg01259.html
I finally have all the pieces in place for a successful run of
./autobuild.sh with a VPATH build. Now, we just have to remember
to run ./autobuild.sh more frequently.
Eric Blake (2):
build: fix VPATH build of docs
autobuild.sh: use VPATH build
.gitignore | 1 +
autobuild.sh | 10 +++++-----
docs/Makefile.am | 46 +++++++++++++++++++++++++++-------------------
docs/apibuild.py | 18 +++++++++++++-----
4 files changed, 46 insertions(+), 29 deletions(-)
14 years, 5 months
[libvirt] [PATCH 0/1] Fix nodeDeviceDestroy return value
by David Allan
Set nodeDeviceDestroy's return value correctly in failure cases and clarify the error message generated when the parent device is not vport capable.
David Allan (1):
Fix device destroy return value
src/conf/node_device_conf.c | 4 ++--
src/node_device/node_device_driver.c | 3 ++-
2 files changed, 4 insertions(+), 3 deletions(-)
14 years, 5 months
[libvirt] [PATCH 0/1] Update nodedev scsi_host data
by David Allan
While investigating https://bugzilla.redhat.com/show_bug.cgi?id=597998 I noticed that my nodedev-create operations were timing out more often than not. This patch works around a race between udev generating a new device event and the corresponding sysfs entries being populated. This behavior has appeared before, but previously it was limited to incorrect WWNs; now no FC capability appears at all until the nodedev entry is refreshed.
David Allan (1):
Update nodedev scsi_host data before use
src/node_device/node_device_driver.c | 19 +++----------------
1 files changed, 3 insertions(+), 16 deletions(-)
14 years, 5 months
[libvirt] [PATCH] Ensure UNIX domain sockets are removed on daemon shutdown
by Daniel P. Berrange
When libvirtd exits it is leaving UNIX domain sockets on
the filesystem. These need to be removed.
The qemudInitPaths() method has signficant code churn to
switch from using a pre-allocated buffer on the stack, to
dynamically allocating on the heap.
* daemon/libvirtd.c, daemon/libvirtd.h: Store a reference
to the UNIX domain socket path and unlink it on shutdown
---
daemon/libvirtd.c | 127 ++++++++++++++++++++++++++++------------------------
daemon/libvirtd.h | 1 +
2 files changed, 69 insertions(+), 59 deletions(-)
diff --git a/daemon/libvirtd.c b/daemon/libvirtd.c
index e86f78d..381b2df 100644
--- a/daemon/libvirtd.c
+++ b/daemon/libvirtd.c
@@ -533,7 +533,7 @@ static int qemudWritePidFile(const char *pidFile) {
}
static int qemudListenUnix(struct qemud_server *server,
- const char *path, int readonly, int auth) {
+ char *path, int readonly, int auth) {
struct qemud_socket *sock;
struct sockaddr_un addr;
mode_t oldmask;
@@ -549,6 +549,7 @@ static int qemudListenUnix(struct qemud_server *server,
sock->port = -1;
sock->type = QEMUD_SOCK_TYPE_UNIX;
sock->auth = auth;
+ sock->path = path;
if ((sock->fd = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
VIR_ERROR(_("Failed to create socket: %s"),
@@ -743,17 +744,30 @@ cleanup:
}
static int qemudInitPaths(struct qemud_server *server,
- char *sockname,
- char *roSockname,
- int maxlen)
+ char **sockname,
+ char **roSockname)
{
- char *sock_dir;
- char *dir_prefix = NULL;
- int ret = -1;
+ char *base_dir_prefix = NULL;
char *sock_dir_prefix = NULL;
+ int ret = -1;
+
+ /* The base_dir_prefix is the base under which all libvirtd
+ * files live */
+ if (server->privileged) {
+ if (!(base_dir_prefix = strdup (LOCAL_STATE_DIR)))
+ goto no_memory;
+ } else {
+ uid_t uid = geteuid();
+ if (!(base_dir_prefix = virGetUserDirectory(uid)))
+ goto cleanup;
+ }
+ /* The unix_sock_dir is the location under which all
+ * unix domain sockets live */
if (unix_sock_dir) {
- sock_dir = unix_sock_dir;
+ if (!(sock_dir_prefix = strdup(unix_sock_dir)))
+ goto no_memory;
+
/* Change the group ownership of /var/run/libvirt to unix_sock_gid */
if (server->privileged) {
if (chown(unix_sock_dir, -1, unix_sock_gid) < 0)
@@ -761,68 +775,53 @@ static int qemudInitPaths(struct qemud_server *server,
unix_sock_dir);
}
} else {
- sock_dir = sockname;
if (server->privileged) {
- dir_prefix = strdup (LOCAL_STATE_DIR);
- if (dir_prefix == NULL) {
- virReportOOMError();
- goto cleanup;
- }
- if (snprintf (sock_dir, maxlen, "%s/run/libvirt",
- dir_prefix) >= maxlen)
- goto snprintf_error;
+ if (virAsprintf(&sock_dir_prefix, "%s/run/libvirt",
+ base_dir_prefix) < 0)
+ goto no_memory;
} else {
- uid_t uid = geteuid();
- dir_prefix = virGetUserDirectory(uid);
- if (dir_prefix == NULL) {
- /* Do not diagnose here; virGetUserDirectory does that. */
- goto snprintf_error;
- }
-
- if (snprintf(sock_dir, maxlen, "%s/.libvirt", dir_prefix) >= maxlen)
- goto snprintf_error;
+ if (virAsprintf(&sock_dir_prefix, "%s/.libvirt",
+ base_dir_prefix) < 0)
+ goto no_memory;
}
}
- sock_dir_prefix = strdup (sock_dir);
- if (!sock_dir_prefix) {
- virReportOOMError();
- goto cleanup;
- }
-
if (server->privileged) {
- if (snprintf (sockname, maxlen, "%s/libvirt-sock",
- sock_dir_prefix) >= maxlen
- || (snprintf (roSockname, maxlen, "%s/libvirt-sock-ro",
- sock_dir_prefix) >= maxlen))
- goto snprintf_error;
- unlink(sockname);
- unlink(roSockname);
+ if (virAsprintf(sockname, "%s/libvirt-sock",
+ sock_dir_prefix) < 0)
+ goto no_memory;
+ if (virAsprintf(roSockname, "%s/libvirt-sock-ro",
+ sock_dir_prefix) < 0)
+ goto no_memory;
+ unlink(*sockname);
+ unlink(*roSockname);
} else {
- if (snprintf(sockname, maxlen, "@%s/libvirt-sock",
- sock_dir_prefix) >= maxlen)
- goto snprintf_error;
+ if (virAsprintf(sockname, "@%s/libvirt-sock",
+ sock_dir_prefix) < 0)
+ goto no_memory;
+ /* There is no RO socket in unprivileged mode,
+ * since the user always has full RW access
+ * to their private instance */
}
if (server->privileged) {
- if (!(server->logDir = strdup (LOCAL_STATE_DIR "/log/libvirt")))
- virReportOOMError();
+ if (virAsprintf(&server->logDir, "%s/log/libvirt",
+ base_dir_prefix) < 0)
+ goto no_memory;
} else {
- if (virAsprintf(&server->logDir, "%s/.libvirt/log", dir_prefix) < 0)
- virReportOOMError();
+ if (virAsprintf(&server->logDir, "%s/.libvirt/log",
+ base_dir_prefix) < 0)
+ goto no_memory;
}
- if (server->logDir == NULL)
- goto cleanup;
-
ret = 0;
- snprintf_error:
- if (ret)
- VIR_ERROR0(_("Resulting path too long for buffer in qemudInitPaths()"));
+no_memory:
+ if (ret != 0)
+ virReportOOMError();
cleanup:
- VIR_FREE(dir_prefix);
+ VIR_FREE(base_dir_prefix);
VIR_FREE(sock_dir_prefix);
return ret;
}
@@ -931,22 +930,22 @@ static struct qemud_server *qemudInitialize(void) {
}
static int qemudNetworkInit(struct qemud_server *server) {
- char sockname[PATH_MAX];
- char roSockname[PATH_MAX];
+ char *sockname = NULL;
+ char *roSockname = NULL;
#if HAVE_SASL
int err;
#endif /* HAVE_SASL */
- roSockname[0] = '\0';
-
- if (qemudInitPaths(server, sockname, roSockname, PATH_MAX) < 0)
+ if (qemudInitPaths(server, &sockname, &roSockname) < 0)
goto cleanup;
if (qemudListenUnix(server, sockname, 0, auth_unix_rw) < 0)
goto cleanup;
+ sockname = NULL;
- if (roSockname[0] != '\0' && qemudListenUnix(server, roSockname, 1, auth_unix_ro) < 0)
+ if (roSockname != NULL && qemudListenUnix(server, roSockname, 1, auth_unix_ro) < 0)
goto cleanup;
+ roSockname = NULL;
#if HAVE_SASL
if (auth_unix_rw == REMOTE_AUTH_SASL ||
@@ -1057,6 +1056,8 @@ static int qemudNetworkInit(struct qemud_server *server) {
return 0;
cleanup:
+ VIR_FREE(sockname);
+ VIR_FREE(roSockname);
return -1;
}
@@ -1080,6 +1081,7 @@ static int qemudNetworkEnable(struct qemud_server *server) {
return 0;
}
+
static gnutls_session_t
remoteInitializeTLSSession (void)
{
@@ -2422,6 +2424,13 @@ static void qemudCleanup(struct qemud_server *server) {
if (sock->watch)
virEventRemoveHandleImpl(sock->watch);
close(sock->fd);
+
+ /* Unlink unix domain sockets which are not in
+ * the abstract namespace */
+ if (sock->path &&
+ sock->path[0] != '@')
+ unlink(sock->path);
+
VIR_FREE(sock);
sock = next;
}
diff --git a/daemon/libvirtd.h b/daemon/libvirtd.h
index d292681..4d8e7e2 100644
--- a/daemon/libvirtd.h
+++ b/daemon/libvirtd.h
@@ -233,6 +233,7 @@ struct qemud_client {
struct qemud_socket {
+ char *path;
int fd;
int watch;
int readonly;
--
1.6.6.1
14 years, 5 months