[libvirt] [PATCH v3] qemu: rename migration APIs to include Src or Dst in their name
by Daniel P. Berrangé
It is very difficult while reading the migration code trying to
understand whether a particular function is being called on the src side
or the dst side, or either. Putting "Src" or "Dst" in the method names will
make this much more obvious. "Any" is used in a few helpers which can be
called from both sides.
Signed-off-by: Daniel P. Berrangé <berrange(a)redhat.com>
---
Changed in v3:
- Made most naming suggestions from John.
src/qemu/qemu_driver.c | 194 +++----
src/qemu/qemu_migration.c | 1378 +++++++++++++++++++++++----------------------
src/qemu/qemu_migration.h | 264 +++++----
src/qemu/qemu_process.c | 20 +-
tests/qemuxml2argvtest.c | 4 +-
5 files changed, 940 insertions(+), 920 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index ce25e7a088..69a591d633 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -754,7 +754,7 @@ qemuStateInitialize(bool privileged,
if (!(qemu_driver->sharedDevices = virHashCreate(30, qemuSharedDeviceEntryFree)))
goto error;
- if (qemuMigrationErrorInit(qemu_driver) < 0)
+ if (qemuMigrationDstErrorInit(qemu_driver) < 0)
goto error;
if (privileged) {
@@ -3282,7 +3282,7 @@ qemuDomainSaveMemory(virQEMUDriverPtr driver,
goto cleanup;
/* Perform the migration */
- if (qemuMigrationToFile(driver, vm, fd, compressedpath, asyncJob) < 0)
+ if (qemuMigrationSrcToFile(driver, vm, fd, compressedpath, asyncJob) < 0)
goto cleanup;
/* Touch up file header to mark image complete. */
@@ -3338,7 +3338,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver,
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_SAVE,
@@ -3893,11 +3893,11 @@ doCoreDump(virQEMUDriverPtr driver,
goto cleanup;
}
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
- ret = qemuMigrationToFile(driver, vm, fd, compressedpath,
- QEMU_ASYNC_JOB_DUMP);
+ ret = qemuMigrationSrcToFile(driver, vm, fd, compressedpath,
+ QEMU_ASYNC_JOB_DUMP);
}
if (ret < 0)
@@ -4815,8 +4815,8 @@ processMonitorEOFEvent(virQEMUDriverPtr driver,
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
stopFlags |= VIR_QEMU_PROCESS_STOP_MIGRATED;
- qemuMigrationErrorSave(driver, vm->def->name,
- qemuMonitorLastError(priv->mon));
+ qemuMigrationDstErrorSave(driver, vm->def->name,
+ qemuMonitorLastError(priv->mon));
}
event = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_STOPPED,
@@ -12109,15 +12109,15 @@ qemuDomainMigratePrepareTunnel(virConnectPtr dconn,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnelEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- NULL, 0, NULL, NULL, /* No cookies in v2 */
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ NULL, 0, NULL, NULL, /* No cookies in v2 */
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
@@ -12158,7 +12158,7 @@ qemuDomainMigratePrepare2(virConnectPtr dconn,
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
if (virLockManagerPluginUsesState(driver->lockManager)) {
@@ -12168,7 +12168,7 @@ qemuDomainMigratePrepare2(virConnectPtr dconn,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare2EnsureACL(dconn, def) < 0)
@@ -12178,11 +12178,11 @@ qemuDomainMigratePrepare2(virConnectPtr dconn,
* length was not sufficiently large, causing failures
* migrating between old & new libvirtd
*/
- ret = qemuMigrationPrepareDirect(driver,
- NULL, 0, NULL, NULL, /* No cookies */
- uri_in, uri_out,
- &def, origname, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ NULL, 0, NULL, NULL, /* No cookies */
+ uri_in, uri_out,
+ &def, origname, NULL, 0, NULL, 0,
+ compression, flags);
cleanup:
VIR_FREE(compression);
@@ -12218,7 +12218,7 @@ qemuDomainMigratePerform(virDomainPtr dom,
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
if (!(vm = qemuDomObjFromDomain(dom)))
@@ -12240,11 +12240,11 @@ qemuDomainMigratePerform(virDomainPtr dom,
*
* Consume any cookie we were able to decode though
*/
- ret = qemuMigrationPerform(driver, dom->conn, vm, NULL,
- NULL, dconnuri, uri, NULL, NULL, 0, NULL, 0,
- compression, &migParams, cookie, cookielen,
- NULL, NULL, /* No output cookies in v2 */
- flags, dname, resource, false);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, NULL,
+ NULL, dconnuri, uri, NULL, NULL, 0, NULL, 0,
+ compression, &migParams, cookie, cookielen,
+ NULL, NULL, /* No output cookies in v2 */
+ flags, dname, resource, false);
cleanup:
qemuMigrationParamsClear(&migParams);
@@ -12273,7 +12273,7 @@ qemuDomainMigrateFinish2(virConnectPtr dconn,
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
goto cleanup;
}
@@ -12286,9 +12286,9 @@ qemuDomainMigrateFinish2(virConnectPtr dconn,
* length was not sufficiently large, causing failures
* migrating between old & new libvirtd
*/
- dom = qemuMigrationFinish(driver, dconn, vm,
- NULL, 0, NULL, NULL, /* No cookies */
- flags, retcode, false);
+ dom = qemuMigrationDstFinish(driver, dconn, vm,
+ NULL, 0, NULL, NULL, /* No cookies */
+ flags, retcode, false);
cleanup:
return dom;
@@ -12320,8 +12320,8 @@ qemuDomainMigrateBegin3(virDomainPtr domain,
return NULL;
}
- return qemuMigrationBegin(domain->conn, vm, xmlin, dname,
- cookieout, cookieoutlen, 0, NULL, flags);
+ return qemuMigrationSrcBegin(domain->conn, vm, xmlin, dname,
+ cookieout, cookieoutlen, 0, NULL, flags);
}
static char *
@@ -12366,9 +12366,9 @@ qemuDomainMigrateBegin3Params(virDomainPtr domain,
goto cleanup;
}
- ret = qemuMigrationBegin(domain->conn, vm, xmlin, dname,
- cookieout, cookieoutlen,
- nmigrate_disks, migrate_disks, flags);
+ ret = qemuMigrationSrcBegin(domain->conn, vm, xmlin, dname,
+ cookieout, cookieoutlen,
+ nmigrate_disks, migrate_disks, flags);
cleanup:
VIR_FREE(migrate_disks);
@@ -12407,21 +12407,21 @@ qemuDomainMigratePrepare3(virConnectPtr dconn,
goto cleanup;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare3EnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareDirect(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- uri_in, uri_out,
- &def, origname, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ uri_in, uri_out,
+ &def, origname, NULL, 0, NULL, 0,
+ compression, flags);
cleanup:
VIR_FREE(compression);
@@ -12483,7 +12483,7 @@ qemuDomainMigratePrepare3Params(virConnectPtr dconn,
if (nmigrate_disks < 0)
goto cleanup;
- if (!(compression = qemuMigrationCompressionParse(params, nparams, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(params, nparams, flags)))
goto cleanup;
if (flags & VIR_MIGRATE_TUNNELLED) {
@@ -12496,19 +12496,19 @@ qemuDomainMigratePrepare3Params(virConnectPtr dconn,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepare3ParamsEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareDirect(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- uri_in, uri_out,
- &def, origname, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, flags);
+ ret = qemuMigrationDstPrepareDirect(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ uri_in, uri_out,
+ &def, origname, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, flags);
cleanup:
VIR_FREE(compression);
@@ -12545,16 +12545,16 @@ qemuDomainMigratePrepareTunnel3(virConnectPtr dconn,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnel3EnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
@@ -12598,16 +12598,16 @@ qemuDomainMigratePrepareTunnel3Params(virConnectPtr dconn,
goto cleanup;
}
- if (!(def = qemuMigrationPrepareDef(driver, dom_xml, dname, &origname)))
+ if (!(def = qemuMigrationAnyPrepareDef(driver, dom_xml, dname, &origname)))
goto cleanup;
if (virDomainMigratePrepareTunnel3ParamsEnsureACL(dconn, def) < 0)
goto cleanup;
- ret = qemuMigrationPrepareTunnel(driver,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- st, &def, origname, flags);
+ ret = qemuMigrationDstPrepareTunnel(driver,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ st, &def, origname, flags);
cleanup:
VIR_FREE(origname);
@@ -12637,7 +12637,7 @@ qemuDomainMigratePerform3(virDomainPtr dom,
virCheckFlags(QEMU_MIGRATION_FLAGS, -1);
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
return -1;
if (!(vm = qemuDomObjFromDomain(dom)))
@@ -12648,12 +12648,12 @@ qemuDomainMigratePerform3(virDomainPtr dom,
goto cleanup;
}
- ret = qemuMigrationPerform(driver, dom->conn, vm, xmlin, NULL,
- dconnuri, uri, NULL, NULL, 0, NULL, 0,
- compression, &migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, dname, resource, true);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, xmlin, NULL,
+ dconnuri, uri, NULL, NULL, 0, NULL, 0,
+ compression, &migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource, true);
cleanup:
qemuMigrationParamsClear(&migParams);
@@ -12728,7 +12728,7 @@ qemuDomainMigratePerform3Params(virDomainPtr dom,
if (!(migParams = qemuMigrationParams(params, nparams, flags)))
goto cleanup;
- if (!(compression = qemuMigrationCompressionParse(params, nparams, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(params, nparams, flags)))
goto cleanup;
if (!(vm = qemuDomObjFromDomain(dom)))
@@ -12739,12 +12739,12 @@ qemuDomainMigratePerform3Params(virDomainPtr dom,
goto cleanup;
}
- ret = qemuMigrationPerform(driver, dom->conn, vm, dom_xml, persist_xml,
- dconnuri, uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen, cookieout, cookieoutlen,
- flags, dname, bandwidth, true);
+ ret = qemuMigrationSrcPerform(driver, dom->conn, vm, dom_xml, persist_xml,
+ dconnuri, uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen, cookieout, cookieoutlen,
+ flags, dname, bandwidth, true);
cleanup:
VIR_FREE(compression);
qemuMigrationParamsFree(&migParams);
@@ -12779,7 +12779,7 @@ qemuDomainMigrateFinish3(virConnectPtr dconn,
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
return NULL;
}
@@ -12788,10 +12788,10 @@ qemuDomainMigrateFinish3(virConnectPtr dconn,
return NULL;
}
- return qemuMigrationFinish(driver, dconn, vm,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, cancelled, true);
+ return qemuMigrationDstFinish(driver, dconn, vm,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, cancelled, true);
}
static virDomainPtr
@@ -12827,7 +12827,7 @@ qemuDomainMigrateFinish3Params(virConnectPtr dconn,
if (!vm) {
virReportError(VIR_ERR_NO_DOMAIN,
_("no domain with matching name '%s'"), dname);
- qemuMigrationErrorReport(driver, dname);
+ qemuMigrationDstErrorReport(driver, dname);
return NULL;
}
@@ -12836,10 +12836,10 @@ qemuDomainMigrateFinish3Params(virConnectPtr dconn,
return NULL;
}
- return qemuMigrationFinish(driver, dconn, vm,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, cancelled, true);
+ return qemuMigrationDstFinish(driver, dconn, vm,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, cancelled, true);
}
@@ -12862,8 +12862,8 @@ qemuDomainMigrateConfirm3(virDomainPtr domain,
return -1;
}
- return qemuMigrationConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
- flags, cancelled);
+ return qemuMigrationSrcConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
+ flags, cancelled);
}
static int
@@ -12890,8 +12890,8 @@ qemuDomainMigrateConfirm3Params(virDomainPtr domain,
return -1;
}
- return qemuMigrationConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
- flags, cancelled);
+ return qemuMigrationSrcConfirm(domain->conn->privateData, vm, cookiein, cookieinlen,
+ flags, cancelled);
}
@@ -13180,14 +13180,14 @@ qemuDomainGetJobInfoMigrationStats(virQEMUDriverPtr driver,
jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) {
if (events &&
jobInfo->status != QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
- qemuMigrationFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
- jobInfo, NULL) < 0)
+ qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ jobInfo, NULL) < 0)
return -1;
if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE &&
jobInfo->statsType == QEMU_DOMAIN_JOB_STATS_TYPE_MIGRATION &&
- qemuMigrationFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
- jobInfo) < 0)
+ qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_NONE,
+ jobInfo) < 0)
return -1;
if (qemuDomainJobInfoUpdateTime(jobInfo) < 0)
@@ -13582,7 +13582,7 @@ qemuDomainMigrateGetCompressionCache(virDomainPtr dom,
priv = vm->privateData;
- if (!qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
+ if (!qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Compressed migration is not supported by "
"QEMU binary"));
@@ -13633,7 +13633,7 @@ qemuDomainMigrateSetCompressionCache(virDomainPtr dom,
priv = vm->privateData;
- if (!qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
+ if (!qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_XBZRLE)) {
virReportError(VIR_ERR_OPERATION_UNSUPPORTED, "%s",
_("Compressed migration is not supported by "
"QEMU binary"));
@@ -13985,7 +13985,7 @@ qemuDomainSnapshotCreateActiveInternal(virQEMUDriverPtr driver,
bool resume = false;
int ret = -1;
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@@ -14918,7 +14918,7 @@ qemuDomainSnapshotCreateActiveExternal(virQEMUDriverPtr driver,
/* do the memory snapshot if necessary */
if (memory) {
/* check if migration is possible */
- if (!qemuMigrationIsAllowed(driver, vm, false, 0))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, false, 0))
goto cleanup;
/* allow the migration job to be cancelled or the domain to be paused */
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index 29247d6a39..99d58325cb 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -115,7 +115,7 @@ qemuMigrationJobFinish(virQEMUDriverPtr driver,
virDomainObjPtr obj)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
-/* qemuMigrationCheckTLSCreds
+/* qemuMigrationParamsCheckTLSCreds
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
@@ -129,9 +129,9 @@ qemuMigrationJobFinish(virQEMUDriverPtr driver,
* private domain structure. Returns -1 on failure.
*/
static int
-qemuMigrationCheckTLSCreds(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsCheckTLSCreds(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -158,7 +158,7 @@ qemuMigrationCheckTLSCreds(virQEMUDriverPtr driver,
}
-/* qemuMigrationCheckSetupTLS
+/* qemuMigrationParamsCheckSetupTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @cfg: configuration pointer
@@ -174,10 +174,10 @@ qemuMigrationCheckTLSCreds(virQEMUDriverPtr driver,
* Returns 0 on success, -1 on error/failure
*/
static int
-qemuMigrationCheckSetupTLS(virQEMUDriverPtr driver,
- virQEMUDriverConfigPtr cfg,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsCheckSetupTLS(virQEMUDriverPtr driver,
+ virQEMUDriverConfigPtr cfg,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -187,7 +187,7 @@ qemuMigrationCheckSetupTLS(virQEMUDriverPtr driver,
return -1;
}
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
if (!priv->migTLSAlias) {
@@ -208,7 +208,7 @@ qemuMigrationCheckSetupTLS(virQEMUDriverPtr driver,
}
-/* qemuMigrationAddTLSObjects
+/* qemuMigrationParamsAddTLSObjects
* @driver: pointer to qemu driver
* @vm: domain object
* @cfg: configuration pointer
@@ -223,14 +223,14 @@ qemuMigrationCheckSetupTLS(virQEMUDriverPtr driver,
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationAddTLSObjects(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virQEMUDriverConfigPtr cfg,
- bool tlsListen,
- qemuDomainAsyncJob asyncJob,
- char **tlsAlias,
- char **secAlias,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationParamsAddTLSObjects(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virQEMUDriverConfigPtr cfg,
+ bool tlsListen,
+ qemuDomainAsyncJob asyncJob,
+ char **tlsAlias,
+ char **secAlias,
+ qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virJSONValuePtr tlsProps = NULL;
@@ -266,7 +266,7 @@ qemuMigrationAddTLSObjects(virQEMUDriverPtr driver,
static void
-qemuMigrationStoreDomainState(virDomainObjPtr vm)
+qemuMigrationSrcStoreDomainState(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
priv->preMigrationState = virDomainObjGetState(vm, NULL);
@@ -277,7 +277,7 @@ qemuMigrationStoreDomainState(virDomainObjPtr vm)
/* Returns true if the domain was resumed, false otherwise */
static bool
-qemuMigrationRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
+qemuMigrationSrcRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int reason;
@@ -320,9 +320,9 @@ qemuMigrationRestoreDomainState(virQEMUDriverPtr driver, virDomainObjPtr vm)
static int
-qemuMigrationPrecreateDisk(virConnectPtr conn,
- virDomainDiskDefPtr disk,
- unsigned long long capacity)
+qemuMigrationDstPrecreateDisk(virConnectPtr conn,
+ virDomainDiskDefPtr disk,
+ unsigned long long capacity)
{
int ret = -1;
virStoragePoolPtr pool = NULL;
@@ -425,8 +425,8 @@ qemuMigrationPrecreateDisk(virConnectPtr conn,
}
static bool
-qemuMigrateDisk(virDomainDiskDef const *disk,
- size_t nmigrate_disks, const char **migrate_disks)
+qemuMigrationAnyCopyDisk(virDomainDiskDef const *disk,
+ size_t nmigrate_disks, const char **migrate_disks)
{
size_t i;
@@ -447,11 +447,11 @@ qemuMigrateDisk(virDomainDiskDef const *disk,
static int
-qemuMigrationPrecreateStorage(virDomainObjPtr vm,
- qemuMigrationCookieNBDPtr nbd,
- size_t nmigrate_disks,
- const char **migrate_disks,
- bool incremental)
+qemuMigrationDstPrecreateStorage(virDomainObjPtr vm,
+ qemuMigrationCookieNBDPtr nbd,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ bool incremental)
{
int ret = -1;
size_t i = 0;
@@ -481,7 +481,7 @@ qemuMigrationPrecreateStorage(virDomainObjPtr vm,
diskSrcPath = virDomainDiskGetSource(disk);
/* Skip disks we don't want to migrate and already existing disks. */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks) ||
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks) ||
(diskSrcPath && virFileExists(diskSrcPath))) {
continue;
}
@@ -495,7 +495,7 @@ qemuMigrationPrecreateStorage(virDomainObjPtr vm,
VIR_DEBUG("Proceeding with disk source %s", NULLSTR(diskSrcPath));
- if (qemuMigrationPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
+ if (qemuMigrationDstPrecreateDisk(conn, disk, nbd->disks[i].capacity) < 0)
goto cleanup;
}
@@ -507,7 +507,7 @@ qemuMigrationPrecreateStorage(virDomainObjPtr vm,
/**
- * qemuMigrationStartNBDServer:
+ * qemuMigrationDstStartNBDServer:
* @driver: qemu driver
* @vm: domain
*
@@ -519,12 +519,12 @@ qemuMigrationPrecreateStorage(virDomainObjPtr vm,
* Returns 0 on success, -1 otherwise.
*/
static int
-qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *listenAddr,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort)
+qemuMigrationDstStartNBDServer(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *listenAddr,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -542,7 +542,7 @@ qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
virDomainDiskDefPtr disk = vm->def->disks[i];
/* check whether disk should be migrated */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (disk->src->readonly || virStorageSourceIsEmpty(disk->src)) {
@@ -592,9 +592,9 @@ qemuMigrationStartNBDServer(virQEMUDriverPtr driver,
static int
-qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig)
+qemuMigrationDstStopNBDServer(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -617,12 +617,12 @@ qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
/**
- * qemuMigrationDriveMirrorReady:
+ * qemuMigrationSrcDriveMirrorReady:
* @driver: qemu driver
* @vm: domain
*
* Check the status of all drive-mirrors started by
- * qemuMigrationDriveMirror. Any pending block job events
+ * qemuMigrationSrcDriveMirror. Any pending block job events
* for the mirrored disks will be processed.
*
* Returns 1 if all mirrors are "ready",
@@ -630,9 +630,9 @@ qemuMigrationStopNBDServer(virQEMUDriverPtr driver,
* -1 on error.
*/
static int
-qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcDriveMirrorReady(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
size_t i;
size_t notReady = 0;
@@ -774,11 +774,11 @@ qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver,
* -1 on error or when job failed and failNoJob is true.
*/
static int
-qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virDomainDiskDefPtr disk,
- bool failNoJob,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcCancelOneDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virDomainDiskDefPtr disk,
+ bool failNoJob,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char *diskAlias = NULL;
@@ -829,7 +829,7 @@ qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
/**
- * qemuMigrationCancelDriveMirror:
+ * qemuMigrationSrcCancelDriveMirror:
* @driver: qemu driver
* @vm: domain
* @check: if true report an error when some of the mirrors fails
@@ -841,11 +841,11 @@ qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver,
* Returns 0 on success, -1 otherwise.
*/
static int
-qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool check,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn)
+qemuMigrationSrcCancelDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool check,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn)
{
virErrorPtr err = NULL;
int ret = -1;
@@ -862,8 +862,8 @@ qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver,
if (!diskPriv->migrating)
continue;
- rv = qemuMigrationCancelOneDriveMirror(driver, vm, disk,
- check, asyncJob);
+ rv = qemuMigrationSrcCancelOneDriveMirror(driver, vm, disk,
+ check, asyncJob);
if (rv != 0) {
if (rv < 0) {
if (!err)
@@ -922,22 +922,22 @@ qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver,
* simultaneously to both source and destination. On success,
* update @migrate_flags so we don't tell 'migrate' command
* to do the very same operation. On failure, the caller is
- * expected to call qemuMigrationCancelDriveMirror to stop all
+ * expected to call qemuMigrationSrcCancelDriveMirror to stop all
* running mirrors.
*
* Returns 0 on success (@migrate_flags updated),
* -1 otherwise.
*/
static int
-qemuMigrationDriveMirror(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig,
- const char *host,
- unsigned long speed,
- unsigned int *migrate_flags,
- size_t nmigrate_disks,
- const char **migrate_disks,
- virConnectPtr dconn)
+qemuMigrationSrcDriveMirror(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig,
+ const char *host,
+ unsigned long speed,
+ unsigned int *migrate_flags,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ virConnectPtr dconn)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
@@ -982,7 +982,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
int mon_ret;
/* check whether disk should be migrated */
- if (!qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ if (!qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (!(diskAlias = qemuAliasFromDisk(disk)) ||
@@ -1013,8 +1013,8 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
}
}
- while ((rv = qemuMigrationDriveMirrorReady(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
+ while ((rv = qemuMigrationSrcDriveMirrorReady(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) {
if (rv < 0)
goto cleanup;
@@ -1036,8 +1036,8 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
goto cleanup;
}
- qemuMigrationFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- priv->job.current);
+ qemuMigrationSrcFetchMirrorStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ priv->job.current);
/* Okay, all disks are ready. Modify migrate_flags */
*migrate_flags &= ~(QEMU_MONITOR_MIGRATE_NON_SHARED_DISK |
@@ -1054,14 +1054,14 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver,
/**
- * qemuMigrationIsAllowedHostdev:
+ * qemuMigrationSrcIsAllowedHostdev:
* @def: domain definition
*
* Checks that @def does not contain any host devices unsupported accross
* migrations. Returns true if the vm is allowed to migrate.
*/
static bool
-qemuMigrationIsAllowedHostdev(const virDomainDef *def)
+qemuMigrationSrcIsAllowedHostdev(const virDomainDef *def)
{
size_t i;
@@ -1082,7 +1082,7 @@ qemuMigrationIsAllowedHostdev(const virDomainDef *def)
/**
- * qemuMigrationIsAllowed:
+ * qemuMigrationSrcIsAllowed:
* @driver: qemu driver struct
* @vm: domain object
* @remote: migration is remote
@@ -1096,10 +1096,10 @@ qemuMigrationIsAllowedHostdev(const virDomainDef *def)
* false otherwise.
*/
bool
-qemuMigrationIsAllowed(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool remote,
- unsigned int flags)
+qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool remote,
+ unsigned int flags)
{
int nsnapshots;
int pauseReason;
@@ -1145,7 +1145,7 @@ qemuMigrationIsAllowed(virQEMUDriverPtr driver,
return false;
}
- if (!qemuMigrationIsAllowedHostdev(vm->def))
+ if (!qemuMigrationSrcIsAllowedHostdev(vm->def))
return false;
if (vm->def->cpu) {
@@ -1200,10 +1200,10 @@ qemuMigrationIsAllowed(virQEMUDriverPtr driver,
}
static bool
-qemuMigrationIsSafe(virDomainDefPtr def,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned int flags)
+qemuMigrationSrcIsSafe(virDomainDefPtr def,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned int flags)
{
bool storagemigration = flags & (VIR_MIGRATE_NON_SHARED_DISK |
@@ -1226,7 +1226,7 @@ qemuMigrationIsSafe(virDomainDefPtr def,
/* disks which are migrated by qemu are safe too */
if (storagemigration &&
- qemuMigrateDisk(disk, nmigrate_disks, migrate_disks))
+ qemuMigrationAnyCopyDisk(disk, nmigrate_disks, migrate_disks))
continue;
if (virDomainDiskGetType(disk) == VIR_STORAGE_TYPE_FILE) {
@@ -1252,12 +1252,12 @@ qemuMigrationIsSafe(virDomainDefPtr def,
return true;
}
-/** qemuMigrationSetOffline
+/** qemuMigrationSrcSetOffline
* Pause domain for non-live migration.
*/
int
-qemuMigrationSetOffline(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationSrcSetOffline(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
int ret;
VIR_DEBUG("driver=%p vm=%p", driver, vm);
@@ -1277,8 +1277,8 @@ qemuMigrationSetOffline(virQEMUDriverPtr driver,
void
-qemuMigrationPostcopyFailed(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
virDomainState state;
int reason;
@@ -1318,7 +1318,7 @@ qemuMigrationPostcopyFailed(virQEMUDriverPtr driver,
static int
-qemuMigrationSetOption(virQEMUDriverPtr driver,
+qemuMigrationOptionSet(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuMonitorMigrationCaps capability,
bool state,
@@ -1327,7 +1327,7 @@ qemuMigrationSetOption(virQEMUDriverPtr driver,
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
- if (!qemuMigrationCapsGet(vm, capability)) {
+ if (!qemuMigrationAnyCapsGet(vm, capability)) {
if (!state) {
/* Unsupported but we want it off anyway */
return 0;
@@ -1360,14 +1360,14 @@ qemuMigrationSetOption(virQEMUDriverPtr driver,
static int
-qemuMigrationSetPostCopy(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool state,
- qemuDomainAsyncJob job)
+qemuMigrationOptionSetPostCopy(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool state,
+ qemuDomainAsyncJob job)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_POSTCOPY,
state, job) < 0)
return -1;
@@ -1378,7 +1378,7 @@ qemuMigrationSetPostCopy(virQEMUDriverPtr driver,
static int
-qemuMigrationWaitForSpice(virDomainObjPtr vm)
+qemuMigrationSrcWaitForSpice(virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -1437,11 +1437,11 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo)
int
-qemuMigrationFetchStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo,
- char **error)
+qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo,
+ char **error)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuMonitorMigrationStats stats;
@@ -1480,7 +1480,7 @@ qemuMigrationJobName(virDomainObjPtr vm)
static int
-qemuMigrationCheckJobStatus(virQEMUDriverPtr driver,
+qemuMigrationJobCheckStatus(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob)
{
@@ -1492,7 +1492,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver,
if (!events ||
jobInfo->stats.mig.status == QEMU_MONITOR_MIGRATION_STATUS_ERROR) {
- if (qemuMigrationFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
+ if (qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, &error) < 0)
return -1;
}
@@ -1534,6 +1534,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver,
enum qemuMigrationCompletedFlags {
QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR = (1 << 0),
+ /* This flag should only be set when run on src host */
QEMU_MIGRATION_COMPLETED_CHECK_STORAGE = (1 << 1),
QEMU_MIGRATION_COMPLETED_POSTCOPY = (1 << 2),
QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER = (1 << 3),
@@ -1547,21 +1548,22 @@ enum qemuMigrationCompletedFlags {
* -2 something else failed, we need to cancel migration.
*/
static int
-qemuMigrationCompleted(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn,
- unsigned int flags)
+qemuMigrationAnyCompleted(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn,
+ unsigned int flags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobInfoPtr jobInfo = priv->job.current;
int pauseReason;
- if (qemuMigrationCheckJobStatus(driver, vm, asyncJob) < 0)
+ if (qemuMigrationJobCheckStatus(driver, vm, asyncJob) < 0)
goto error;
+ /* This flag should only be set when run on src host */
if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE &&
- qemuMigrationDriveMirrorReady(driver, vm, asyncJob) < 0)
+ qemuMigrationSrcDriveMirrorReady(driver, vm, asyncJob) < 0)
goto error;
if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR &&
@@ -1637,11 +1639,11 @@ qemuMigrationCompleted(virQEMUDriverPtr driver,
* QEMU reports failed migration.
*/
static int
-qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- virConnectPtr dconn,
- unsigned int flags)
+qemuMigrationSrcWaitForCompletion(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ virConnectPtr dconn,
+ unsigned int flags)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuDomainJobInfoPtr jobInfo = priv->job.current;
@@ -1650,8 +1652,8 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING;
- while ((rv = qemuMigrationCompleted(driver, vm, asyncJob,
- dconn, flags)) != 1) {
+ while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
+ dconn, flags)) != 1) {
if (rv < 0)
return rv;
@@ -1671,7 +1673,7 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
}
if (events)
- ignore_value(qemuMigrationFetchStats(driver, vm, asyncJob, jobInfo, NULL));
+ ignore_value(qemuMigrationAnyFetchStats(driver, vm, asyncJob, jobInfo, NULL));
qemuDomainJobInfoUpdateTime(jobInfo);
qemuDomainJobInfoUpdateDowntime(jobInfo);
@@ -1690,10 +1692,10 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver,
static int
-qemuMigrationWaitForDestCompletion(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- bool postcopy)
+qemuMigrationDstWaitForCompletion(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ bool postcopy)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
unsigned int flags = 0;
@@ -1707,8 +1709,8 @@ qemuMigrationWaitForDestCompletion(virQEMUDriverPtr driver,
if (postcopy)
flags = QEMU_MIGRATION_COMPLETED_POSTCOPY;
- while ((rv = qemuMigrationCompleted(driver, vm, asyncJob,
- NULL, flags)) != 1) {
+ while ((rv = qemuMigrationAnyCompleted(driver, vm, asyncJob,
+ NULL, flags)) != 1) {
if (rv < 0 || virDomainObjWait(vm) < 0)
return -1;
}
@@ -1718,10 +1720,10 @@ qemuMigrationWaitForDestCompletion(virQEMUDriverPtr driver,
static int
-qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr cookie,
- const char *graphicsuri)
+qemuMigrationSrcGraphicsRelocate(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr cookie,
+ const char *graphicsuri)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
@@ -1817,9 +1819,9 @@ qemuDomainMigrateGraphicsRelocate(virQEMUDriverPtr driver,
static int
-qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr cookie)
+qemuMigrationDstOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr cookie)
{
virDomainNetDefPtr netptr;
int ret = -1;
@@ -1854,8 +1856,8 @@ qemuDomainMigrateOPDRelocate(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
int
-qemuMigrationCheckIncoming(virQEMUCapsPtr qemuCaps,
- const char *migrateFrom)
+qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
+ const char *migrateFrom)
{
if (STRPREFIX(migrateFrom, "rdma")) {
if (!virQEMUCapsGet(qemuCaps, QEMU_CAPS_MIGRATE_RDMA)) {
@@ -1879,8 +1881,8 @@ qemuMigrationCheckIncoming(virQEMUCapsPtr qemuCaps,
char *
-qemuMigrationIncomingURI(const char *migrateFrom,
- int migrateFd)
+qemuMigrationDstGetURI(const char *migrateFrom,
+ int migrateFd)
{
char *uri = NULL;
@@ -1894,10 +1896,10 @@ qemuMigrationIncomingURI(const char *migrateFrom,
int
-qemuMigrationRunIncoming(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *uri,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationDstRun(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *uri,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret = -1;
@@ -1914,12 +1916,12 @@ qemuMigrationRunIncoming(virQEMUDriverPtr driver,
goto cleanup;
if (asyncJob == QEMU_ASYNC_JOB_MIGRATION_IN) {
- /* qemuMigrationWaitForDestCompletion is called from the Finish phase */
+ /* qemuMigrationDstWaitForCompletion is called from the Finish phase */
ret = 0;
goto cleanup;
}
- if (qemuMigrationWaitForDestCompletion(driver, vm, asyncJob, false) < 0)
+ if (qemuMigrationDstWaitForCompletion(driver, vm, asyncJob, false) < 0)
goto cleanup;
ret = 0;
@@ -1936,9 +1938,9 @@ qemuMigrationRunIncoming(virQEMUDriverPtr driver,
* qemuDomainMigratePerform3 and qemuDomainMigrateConfirm3.
*/
static virDomainObjPtr
-qemuMigrationCleanup(virDomainObjPtr vm,
- virConnectPtr conn,
- void *opaque)
+qemuMigrationSrcCleanup(virDomainObjPtr vm,
+ virConnectPtr conn,
+ void *opaque)
{
virQEMUDriverPtr driver = opaque;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -1994,15 +1996,15 @@ qemuMigrationCleanup(virDomainObjPtr vm,
/* The caller is supposed to lock the vm and start a migration job. */
static char *
-qemuMigrationBeginPhase(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dname,
- char **cookieout,
- int *cookieoutlen,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned long flags)
+qemuMigrationSrcBeginPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dname,
+ char **cookieout,
+ int *cookieoutlen,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned long flags)
{
char *rv = NULL;
qemuMigrationCookiePtr mig = NULL;
@@ -2028,11 +2030,11 @@ qemuMigrationBeginPhase(virQEMUDriverPtr driver,
if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT)
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_BEGIN3);
- if (!qemuMigrationIsAllowed(driver, vm, true, flags))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
goto cleanup;
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
- !qemuMigrationIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
+ !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
goto cleanup;
if (flags & VIR_MIGRATE_POSTCOPY &&
@@ -2164,15 +2166,15 @@ qemuMigrationBeginPhase(virQEMUDriverPtr driver,
}
char *
-qemuMigrationBegin(virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dname,
- char **cookieout,
- int *cookieoutlen,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned long flags)
+qemuMigrationSrcBegin(virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dname,
+ char **cookieout,
+ int *cookieoutlen,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned long flags)
{
virQEMUDriverPtr driver = conn->privateData;
virQEMUDriverConfigPtr cfg = NULL;
@@ -2189,7 +2191,7 @@ qemuMigrationBegin(virConnectPtr conn,
asyncJob = QEMU_ASYNC_JOB_NONE;
}
- qemuMigrationStoreDomainState(vm);
+ qemuMigrationSrcStoreDomainState(vm);
if (!virDomainObjIsActive(vm) && !(flags & VIR_MIGRATE_OFFLINE)) {
virReportError(VIR_ERR_OPERATION_INVALID,
@@ -2204,14 +2206,14 @@ qemuMigrationBegin(virConnectPtr conn,
qemuProcessRefreshDisks(driver, vm, asyncJob) < 0)
goto endjob;
- if (!(xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
- cookieout, cookieoutlen,
- nmigrate_disks, migrate_disks, flags)))
+ if (!(xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
+ cookieout, cookieoutlen,
+ nmigrate_disks, migrate_disks, flags)))
goto endjob;
if (flags & VIR_MIGRATE_TLS) {
cfg = virQEMUDriverGetConfig(driver);
- if (qemuMigrationCheckSetupTLS(driver, cfg, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckSetupTLS(driver, cfg, vm, asyncJob) < 0)
goto endjob;
}
@@ -2221,7 +2223,7 @@ qemuMigrationBegin(virConnectPtr conn,
* place.
*/
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
- qemuMigrationCleanup) < 0) {
+ qemuMigrationSrcCleanup) < 0) {
VIR_FREE(xml);
goto endjob;
}
@@ -2248,8 +2250,8 @@ qemuMigrationBegin(virConnectPtr conn,
*/
static void
-qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationDstPrepareCleanup(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -2268,12 +2270,12 @@ qemuMigrationPrepareCleanup(virQEMUDriverPtr driver,
}
static qemuProcessIncomingDefPtr
-qemuMigrationPrepareIncoming(virDomainObjPtr vm,
- bool tunnel,
- const char *protocol,
- const char *listenAddress,
- unsigned short port,
- int fd)
+qemuMigrationDstPrepare(virDomainObjPtr vm,
+ bool tunnel,
+ const char *protocol,
+ const char *listenAddress,
+ unsigned short port,
+ int fd)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
qemuProcessIncomingDefPtr inc = NULL;
@@ -2348,26 +2350,26 @@ qemuMigrationPrepareIncoming(virDomainObjPtr vm,
}
static int
-qemuMigrationSetCompression(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob job,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationParamsSetCompression(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob job,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_XBZRLE,
compression->methods &
- (1ULL << QEMU_MIGRATION_COMPRESS_XBZRLE),
+ (1ULL << QEMU_MIGRATION_COMPRESS_XBZRLE),
job) < 0)
return -1;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_COMPRESS,
compression->methods &
- (1ULL << QEMU_MIGRATION_COMPRESS_MT),
+ (1ULL << QEMU_MIGRATION_COMPRESS_MT),
job) < 0)
return -1;
@@ -2420,7 +2422,7 @@ qemuMigrationParamsFree(qemuMonitorMigrationParamsPtr *migParams)
}
-/* qemuMigrationSetEmptyTLSParams
+/* qemuMigrationParamsSetEmptyTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
@@ -2433,14 +2435,14 @@ qemuMigrationParamsFree(qemuMonitorMigrationParamsPtr *migParams)
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationSetEmptyTLSParams(virQEMUDriverPtr driver,
+qemuMigrationParamsSetEmptyTLS(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob asyncJob,
qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
if (!priv->migTLSAlias)
@@ -2501,7 +2503,7 @@ qemuMigrationParams(virTypedParameterPtr params,
static int
-qemuMigrationSetParams(virQEMUDriverPtr driver,
+qemuMigrationParamsSet(virQEMUDriverPtr driver,
virDomainObjPtr vm,
qemuDomainAsyncJob job,
qemuMonitorMigrationParamsPtr migParams)
@@ -2525,7 +2527,7 @@ qemuMigrationSetParams(virQEMUDriverPtr driver,
}
-/* qemuMigrationResetTLS
+/* qemuMigrationParamsResetTLS
* @driver: pointer to qemu driver
* @vm: domain object
* @asyncJob: migration job to join
@@ -2536,9 +2538,9 @@ qemuMigrationSetParams(virQEMUDriverPtr driver,
* Returns 0 on success, -1 on failure
*/
static int
-qemuMigrationResetTLS(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationParamsResetTLS(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
char *tlsAlias = NULL;
@@ -2546,7 +2548,7 @@ qemuMigrationResetTLS(virQEMUDriverPtr driver,
qemuMonitorMigrationParams migParams = { 0 };
int ret = -1;
- if (qemuMigrationCheckTLSCreds(driver, vm, asyncJob) < 0)
+ if (qemuMigrationParamsCheckTLSCreds(driver, vm, asyncJob) < 0)
return -1;
/* If the tls-creds doesn't exist or if they're set to "" then there's
@@ -2565,7 +2567,7 @@ qemuMigrationResetTLS(virQEMUDriverPtr driver,
if (VIR_STRDUP(migParams.tlsCreds, "") < 0 ||
VIR_STRDUP(migParams.tlsHostname, "") < 0 ||
- qemuMigrationSetParams(driver, vm, asyncJob, &migParams) < 0)
+ qemuMigrationParamsSet(driver, vm, asyncJob, &migParams) < 0)
goto cleanup;
ret = 0;
@@ -2580,23 +2582,23 @@ qemuMigrationResetTLS(virQEMUDriverPtr driver,
static int
-qemuMigrationPrepareAny(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- virDomainDefPtr *def,
- const char *origname,
- virStreamPtr st,
- const char *protocol,
- unsigned short port,
- bool autoPort,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- unsigned long flags)
+qemuMigrationDstPrepareAny(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ virDomainDefPtr *def,
+ const char *origname,
+ virStreamPtr st,
+ const char *protocol,
+ unsigned short port,
+ bool autoPort,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ unsigned long flags)
{
virDomainObjPtr vm = NULL;
virObjectEventPtr event = NULL;
@@ -2664,7 +2666,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
if (!(caps = virQEMUDriverGetCapabilities(driver, false)))
goto cleanup;
- if (!qemuMigrationIsAllowedHostdev(*def))
+ if (!qemuMigrationSrcIsAllowedHostdev(*def))
goto cleanup;
/* Let migration hook filter domain XML */
@@ -2748,9 +2750,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
goto cleanup;
}
- if (qemuMigrationPrecreateStorage(vm, mig->nbd,
- nmigrate_disks, migrate_disks,
- !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
+ if (qemuMigrationDstPrecreateStorage(vm, mig->nbd,
+ nmigrate_disks, migrate_disks,
+ !!(flags & VIR_MIGRATE_NON_SHARED_INC)) < 0)
goto cleanup;
if (qemuMigrationJobStart(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
@@ -2779,9 +2781,9 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
priv->allowReboot = mig->allowReboot;
- if (!(incoming = qemuMigrationPrepareIncoming(vm, tunnel, protocol,
- listenAddress, port,
- dataFD[0])))
+ if (!(incoming = qemuMigrationDstPrepare(vm, tunnel, protocol,
+ listenAddress, port,
+ dataFD[0])))
goto stopjob;
if (qemuProcessPrepareDomain(driver, vm, startFlags) < 0)
@@ -2810,21 +2812,21 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
dataFD[1] = -1; /* 'st' owns the FD now & will close it */
}
- if (qemuMigrationSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
- compression, &migParams) < 0)
+ if (qemuMigrationParamsSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ compression, &migParams) < 0)
goto stopjob;
/* Migrations using TLS need to add the "tls-creds-x509" object and
* set the migration TLS parameters */
if (flags & VIR_MIGRATE_TLS) {
cfg = virQEMUDriverGetConfig(driver);
- if (qemuMigrationCheckSetupTLS(driver, cfg, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuMigrationParamsCheckSetupTLS(driver, cfg, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationAddTLSObjects(driver, vm, cfg, true,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- &tlsAlias, &secAlias, &migParams) < 0)
+ if (qemuMigrationParamsAddTLSObjects(driver, vm, cfg, true,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ &tlsAlias, &secAlias, &migParams) < 0)
goto stopjob;
/* Force reset of 'tls-hostname', it's a source only parameter */
@@ -2832,7 +2834,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
goto stopjob;
} else {
- if (qemuMigrationSetEmptyTLSParams(driver, vm,
+ if (qemuMigrationParamsSetEmptyTLS(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_IN,
&migParams) < 0)
goto stopjob;
@@ -2843,27 +2845,27 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
goto stopjob;
}
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL,
flags & VIR_MIGRATE_RDMA_PIN_ALL,
QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationSetPostCopy(driver, vm,
- flags & VIR_MIGRATE_POSTCOPY,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ if (qemuMigrationOptionSetPostCopy(driver, vm,
+ flags & VIR_MIGRATE_POSTCOPY,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
- if (qemuMigrationSetParams(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
+ if (qemuMigrationParamsSet(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
&migParams) < 0)
goto stopjob;
if (mig->nbd &&
flags & (VIR_MIGRATE_NON_SHARED_DISK | VIR_MIGRATE_NON_SHARED_INC) &&
virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_NBD_SERVER)) {
- if (qemuMigrationStartNBDServer(driver, vm, incoming->address,
- nmigrate_disks, migrate_disks,
- nbdPort) < 0) {
+ if (qemuMigrationDstStartNBDServer(driver, vm, incoming->address,
+ nmigrate_disks, migrate_disks,
+ nbdPort) < 0) {
goto stopjob;
}
cookieFlags |= QEMU_MIGRATION_COOKIE_NBD;
@@ -2879,8 +2881,8 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
}
if (incoming->deferredURI &&
- qemuMigrationRunIncoming(driver, vm, incoming->deferredURI,
- QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
+ qemuMigrationDstRun(driver, vm, incoming->deferredURI,
+ QEMU_ASYNC_JOB_MIGRATION_IN) < 0)
goto stopjob;
if (qemuProcessFinishStartup(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN,
@@ -2897,7 +2899,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
VIR_WARN("Unable to encode migration cookie");
}
- if (qemuDomainCleanupAdd(vm, qemuMigrationPrepareCleanup) < 0)
+ if (qemuDomainCleanupAdd(vm, qemuMigrationDstPrepareCleanup) < 0)
goto stopjob;
if (!(flags & VIR_MIGRATE_OFFLINE)) {
@@ -2952,7 +2954,7 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
return ret;
stopjob:
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
if (stopProcess) {
unsigned int stopFlags = VIR_QEMU_PROCESS_STOP_MIGRATED;
@@ -2973,15 +2975,15 @@ qemuMigrationPrepareAny(virQEMUDriverPtr driver,
* sets up the corresponding virStream to handle the incoming data.
*/
int
-qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- virStreamPtr st,
- virDomainDefPtr *def,
- const char *origname,
- unsigned long flags)
+qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ virStreamPtr st,
+ virDomainDefPtr *def,
+ const char *origname,
+ unsigned long flags)
{
qemuMigrationCompressionPtr compression = NULL;
int ret;
@@ -2998,20 +3000,20 @@ qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
return -1;
}
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
return -1;
- ret = qemuMigrationPrepareAny(driver, cookiein, cookieinlen,
- cookieout, cookieoutlen, def, origname,
- st, NULL, 0, false, NULL, 0, NULL, 0,
- compression, flags);
+ ret = qemuMigrationDstPrepareAny(driver, cookiein, cookieinlen,
+ cookieout, cookieoutlen, def, origname,
+ st, NULL, 0, false, NULL, 0, NULL, 0,
+ compression, flags);
VIR_FREE(compression);
return ret;
}
static virURIPtr
-qemuMigrationParseURI(const char *uri, bool *wellFormed)
+qemuMigrationAnyParseURI(const char *uri, bool *wellFormed)
{
char *tmp = NULL;
virURIPtr parsed;
@@ -3034,21 +3036,21 @@ qemuMigrationParseURI(const char *uri, bool *wellFormed)
int
-qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- const char *uri_in,
- char **uri_out,
- virDomainDefPtr *def,
- const char *origname,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- unsigned long flags)
+qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri_in,
+ char **uri_out,
+ virDomainDefPtr *def,
+ const char *origname,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ unsigned long flags)
{
unsigned short port = 0;
bool autoPort = true;
@@ -3120,7 +3122,7 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
} else {
bool well_formed_uri;
- if (!(uri = qemuMigrationParseURI(uri_in, &well_formed_uri)))
+ if (!(uri = qemuMigrationAnyParseURI(uri_in, &well_formed_uri)))
goto cleanup;
if (uri->scheme == NULL) {
@@ -3166,12 +3168,12 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
if (*uri_out)
VIR_DEBUG("Generated uri_out=%s", *uri_out);
- ret = qemuMigrationPrepareAny(driver, cookiein, cookieinlen,
- cookieout, cookieoutlen, def, origname,
- NULL, uri ? uri->scheme : "tcp",
- port, autoPort, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, flags);
+ ret = qemuMigrationDstPrepareAny(driver, cookiein, cookieinlen,
+ cookieout, cookieoutlen, def, origname,
+ NULL, uri ? uri->scheme : "tcp",
+ port, autoPort, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, flags);
cleanup:
virURIFree(uri);
VIR_FREE(hostname);
@@ -3186,10 +3188,10 @@ qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
virDomainDefPtr
-qemuMigrationPrepareDef(virQEMUDriverPtr driver,
- const char *dom_xml,
- const char *dname,
- char **origname)
+qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
+ const char *dom_xml,
+ const char *dname,
+ char **origname)
{
virCapsPtr caps = NULL;
virDomainDefPtr def;
@@ -3228,12 +3230,12 @@ qemuMigrationPrepareDef(virQEMUDriverPtr driver,
static int
-qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- unsigned int flags,
- int retcode)
+qemuMigrationSrcConfirmPhase(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int retcode)
{
qemuMigrationCookiePtr mig;
virObjectEventPtr event;
@@ -3273,8 +3275,8 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
*/
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY &&
- qemuMigrationFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- jobInfo, NULL) < 0)
+ qemuMigrationAnyFetchStats(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ jobInfo, NULL) < 0)
VIR_WARN("Could not refresh migration statistics");
qemuDomainJobInfoUpdateTime(jobInfo);
@@ -3293,7 +3295,7 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
if (retcode == 0) {
/* If guest uses SPICE and supports seamless migration we have to hold
* up domain shutdown until SPICE server transfers its data */
- qemuMigrationWaitForSpice(vm);
+ qemuMigrationSrcWaitForSpice(vm);
qemuProcessStop(driver, vm, VIR_DOMAIN_SHUTOFF_MIGRATED,
QEMU_ASYNC_JOB_MIGRATION_OUT,
@@ -3310,23 +3312,23 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
int reason;
/* cancel any outstanding NBD jobs */
- qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
+ qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT, NULL);
virSetError(orig_err);
virFreeError(orig_err);
if (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED &&
reason == VIR_DOMAIN_PAUSED_POSTCOPY) {
- qemuMigrationPostcopyFailed(driver, vm);
- } else if (qemuMigrationRestoreDomainState(driver, vm)) {
+ qemuMigrationAnyPostcopyFailed(driver, vm);
+ } else if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
qemuDomainEventQueue(driver, event);
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
if (virDomainSaveStatus(driver->xmlopt, cfg->stateDir, vm, driver->caps) < 0)
VIR_WARN("Failed to save status on vm %s", vm->def->name);
@@ -3342,12 +3344,12 @@ qemuMigrationConfirmPhase(virQEMUDriverPtr driver,
}
int
-qemuMigrationConfirm(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- unsigned int flags,
- int cancelled)
+qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int cancelled)
{
qemuMigrationJobPhase phase;
virQEMUDriverConfigPtr cfg = NULL;
@@ -3365,11 +3367,11 @@ qemuMigrationConfirm(virQEMUDriverPtr driver,
qemuMigrationJobStartPhase(driver, vm, phase);
virCloseCallbacksUnset(driver->closeCallbacks, vm,
- qemuMigrationCleanup);
+ qemuMigrationSrcCleanup);
- ret = qemuMigrationConfirmPhase(driver, vm,
- cookiein, cookieinlen,
- flags, cancelled);
+ ret = qemuMigrationSrcConfirmPhase(driver, vm,
+ cookiein, cookieinlen,
+ flags, cancelled);
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm)) {
@@ -3434,7 +3436,7 @@ struct _qemuMigrationIOThread {
int wakeupSendFD;
};
-static void qemuMigrationIOFunc(void *arg)
+static void qemuMigrationSrcIOFunc(void *arg)
{
qemuMigrationIOThreadPtr data = arg;
char *buffer = NULL;
@@ -3544,8 +3546,8 @@ static void qemuMigrationIOFunc(void *arg)
static qemuMigrationIOThreadPtr
-qemuMigrationStartTunnel(virStreamPtr st,
- int sock)
+qemuMigrationSrcStartTunnel(virStreamPtr st,
+ int sock)
{
qemuMigrationIOThreadPtr io = NULL;
int wakeupFD[2] = { -1, -1 };
@@ -3565,7 +3567,7 @@ qemuMigrationStartTunnel(virStreamPtr st,
io->wakeupSendFD = wakeupFD[1];
if (virThreadCreate(&io->thread, true,
- qemuMigrationIOFunc,
+ qemuMigrationSrcIOFunc,
io) < 0) {
virReportSystemError(errno, "%s",
_("Unable to create migration thread"));
@@ -3582,7 +3584,7 @@ qemuMigrationStartTunnel(virStreamPtr st,
}
static int
-qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
+qemuMigrationSrcStopTunnel(qemuMigrationIOThreadPtr io, bool error)
{
int rv = -1;
char stop = error ? 1 : 0;
@@ -3616,9 +3618,9 @@ qemuMigrationStopTunnel(qemuMigrationIOThreadPtr io, bool error)
}
static int
-qemuMigrationConnect(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationSpecPtr spec)
+qemuMigrationSrcConnect(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationSpecPtr spec)
{
virNetSocketPtr sock;
const char *host;
@@ -3662,10 +3664,10 @@ qemuMigrationConnect(virQEMUDriverPtr driver,
static int
-qemuMigrationContinue(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMonitorMigrationStatus status,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcContinue(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMonitorMigrationStatus status,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int ret;
@@ -3683,22 +3685,22 @@ qemuMigrationContinue(virQEMUDriverPtr driver,
static int
-qemuMigrationRun(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- qemuMigrationSpecPtr spec,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+qemuMigrationSrcRun(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ qemuMigrationSpecPtr spec,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
unsigned int migrate_flags = QEMU_MONITOR_MIGRATE_BACKGROUND;
@@ -3753,8 +3755,8 @@ qemuMigrationRun(virQEMUDriverPtr driver,
if (flags & VIR_MIGRATE_PERSIST_DEST) {
if (persist_xml) {
- if (!(persistDef = qemuMigrationPrepareDef(driver, persist_xml,
- NULL, NULL)))
+ if (!(persistDef = qemuMigrationAnyPrepareDef(driver, persist_xml,
+ NULL, NULL)))
goto error;
} else {
virDomainDefPtr def = vm->newDef ? vm->newDef : vm->def;
@@ -3770,7 +3772,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
if (!mig)
goto error;
- if (qemuDomainMigrateGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
+ if (qemuMigrationSrcGraphicsRelocate(driver, vm, mig, graphicsuri) < 0)
VIR_WARN("unable to provide data for graphics client relocation");
if (flags & VIR_MIGRATE_TLS) {
@@ -3778,9 +3780,9 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* Begin/CheckSetupTLS already set up migTLSAlias, the following
* assumes that and adds the TLS objects to the domain. */
- if (qemuMigrationAddTLSObjects(driver, vm, cfg, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- &tlsAlias, &secAlias, migParams) < 0)
+ if (qemuMigrationParamsAddTLSObjects(driver, vm, cfg, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ &tlsAlias, &secAlias, migParams) < 0)
goto error;
/* We need to add tls-hostname whenever QEMU itself does not
@@ -3795,7 +3797,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
goto error;
}
} else {
- if (qemuMigrationSetEmptyTLSParams(driver, vm,
+ if (qemuMigrationParamsSetEmptyTLS(driver, vm,
QEMU_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
@@ -3805,13 +3807,13 @@ qemuMigrationRun(virQEMUDriverPtr driver,
QEMU_MONITOR_MIGRATE_NON_SHARED_INC)) {
if (mig->nbd) {
/* This will update migrate_flags on success */
- if (qemuMigrationDriveMirror(driver, vm, mig,
- spec->dest.host.name,
- migrate_speed,
- &migrate_flags,
- nmigrate_disks,
- migrate_disks,
- dconn) < 0) {
+ if (qemuMigrationSrcDriveMirror(driver, vm, mig,
+ spec->dest.host.name,
+ migrate_speed,
+ &migrate_flags,
+ nmigrate_disks,
+ migrate_disks,
+ dconn) < 0) {
goto error;
}
} else {
@@ -3825,38 +3827,38 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* Before EnterMonitor, since qemuMigrationSetOffline already does that */
if (!(flags & VIR_MIGRATE_LIVE) &&
virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
- if (qemuMigrationSetOffline(driver, vm) < 0)
+ if (qemuMigrationSrcSetOffline(driver, vm) < 0)
goto error;
}
- if (qemuMigrationSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
- compression, migParams) < 0)
+ if (qemuMigrationParamsSetCompression(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ compression, migParams) < 0)
goto error;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_AUTO_CONVERGE,
flags & VIR_MIGRATE_AUTO_CONVERGE,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_RDMA_PIN_ALL,
flags & VIR_MIGRATE_RDMA_PIN_ALL,
QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetPostCopy(driver, vm,
- flags & VIR_MIGRATE_POSTCOPY,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuMigrationOptionSetPostCopy(driver, vm,
+ flags & VIR_MIGRATE_POSTCOPY,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER) &&
- qemuMigrationSetOption(driver, vm,
+ if (qemuMigrationAnyCapsGet(vm, QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER) &&
+ qemuMigrationOptionSet(driver, vm,
QEMU_MONITOR_MIGRATION_CAPS_PAUSE_BEFORE_SWITCHOVER,
true, QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
- if (qemuMigrationSetParams(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
+ if (qemuMigrationParamsSet(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT,
migParams) < 0)
goto error;
@@ -3880,7 +3882,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* connect to the destination qemu if needed */
if (spec->destType == MIGRATION_DEST_CONNECT_HOST &&
- qemuMigrationConnect(driver, vm, spec) < 0) {
+ qemuMigrationSrcConnect(driver, vm, spec) < 0) {
goto exit_monitor;
}
@@ -3926,7 +3928,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
cancel = true;
if (spec->fwdType != MIGRATION_FWD_DIRECT) {
- if (!(iothread = qemuMigrationStartTunnel(spec->fwd.stream, fd)))
+ if (!(iothread = qemuMigrationSrcStartTunnel(spec->fwd.stream, fd)))
goto error;
/* If we've created a tunnel, then the 'fd' will be closed in the
* qemuMigrationIOFunc as data->sock.
@@ -3942,9 +3944,9 @@ qemuMigrationRun(virQEMUDriverPtr driver,
if (flags & VIR_MIGRATE_POSTCOPY)
waitFlags |= QEMU_MIGRATION_COMPLETED_POSTCOPY;
- rc = qemuMigrationWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn, waitFlags);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn, waitFlags);
if (rc == -2) {
goto error;
} else if (rc == -1) {
@@ -3966,14 +3968,14 @@ qemuMigrationRun(virQEMUDriverPtr driver,
goto error;
}
} else if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING &&
- qemuMigrationSetOffline(driver, vm) < 0) {
+ qemuMigrationSrcSetOffline(driver, vm) < 0) {
goto error;
}
if (mig && mig->nbd &&
- qemuMigrationCancelDriveMirror(driver, vm, true,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn) < 0)
+ qemuMigrationSrcCancelDriveMirror(driver, vm, true,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn) < 0)
goto error;
/* When migration was paused before serializing device state we need to
@@ -3981,16 +3983,16 @@ qemuMigrationRun(virQEMUDriverPtr driver,
* end of the migration.
*/
if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_PAUSED) {
- if (qemuMigrationContinue(driver, vm,
- QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
- QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
+ if (qemuMigrationSrcContinue(driver, vm,
+ QEMU_MONITOR_MIGRATION_STATUS_PRE_SWITCHOVER,
+ QEMU_ASYNC_JOB_MIGRATION_OUT) < 0)
goto error;
waitFlags ^= QEMU_MIGRATION_COMPLETED_PRE_SWITCHOVER;
- rc = qemuMigrationWaitForCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn, waitFlags);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn, waitFlags);
if (rc == -2) {
goto error;
} else if (rc == -1) {
@@ -4004,7 +4006,7 @@ qemuMigrationRun(virQEMUDriverPtr driver,
qemuMigrationIOThreadPtr io;
VIR_STEAL_PTR(io, iothread);
- if (qemuMigrationStopTunnel(io, false) < 0)
+ if (qemuMigrationSrcStopTunnel(io, false) < 0)
goto error;
}
@@ -4058,12 +4060,12 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* cancel any outstanding NBD jobs */
if (mig && mig->nbd)
- qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_MIGRATION_OUT,
- dconn);
+ qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_MIGRATION_OUT,
+ dconn);
if (iothread)
- qemuMigrationStopTunnel(iothread, true);
+ qemuMigrationSrcStopTunnel(iothread, true);
if (priv->job.current->status != QEMU_DOMAIN_JOB_STATUS_CANCELED)
priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED;
@@ -4078,22 +4080,23 @@ qemuMigrationRun(virQEMUDriverPtr driver,
/* Perform migration using QEMU's native migrate support,
* not encrypted obviously
*/
-static int doNativeMigrate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *uri,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+static int
+qemuMigrationSrcPerformNative(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *uri,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virURIPtr uribits = NULL;
@@ -4107,7 +4110,7 @@ static int doNativeMigrate(virQEMUDriverPtr driver,
cookieout, cookieoutlen, flags, resource,
NULLSTR(graphicsuri), nmigrate_disks, migrate_disks);
- if (!(uribits = qemuMigrationParseURI(uri, NULL)))
+ if (!(uribits = qemuMigrationAnyParseURI(uri, NULL)))
return -1;
if (uribits->scheme == NULL) {
@@ -4141,10 +4144,10 @@ static int doNativeMigrate(virQEMUDriverPtr driver,
spec.dest.host.port = uribits->port;
spec.fwdType = MIGRATION_FWD_DIRECT;
- ret = qemuMigrationRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
- cookieoutlen, flags, resource, &spec, dconn,
- graphicsuri, nmigrate_disks, migrate_disks,
- compression, migParams);
+ ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen, cookieout,
+ cookieoutlen, flags, resource, &spec, dconn,
+ graphicsuri, nmigrate_disks, migrate_disks,
+ compression, migParams);
if (spec.destType == MIGRATION_DEST_FD)
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
@@ -4156,22 +4159,23 @@ static int doNativeMigrate(virQEMUDriverPtr driver,
}
-static int doTunnelMigrate(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- virStreamPtr st,
- const char *persist_xml,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource,
- virConnectPtr dconn,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams)
+static int
+qemuMigrationSrcPerformTunnel(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ virStreamPtr st,
+ const char *persist_xml,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource,
+ virConnectPtr dconn,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams)
{
int ret = -1;
qemuMigrationSpec spec;
@@ -4205,10 +4209,10 @@ static int doTunnelMigrate(virQEMUDriverPtr driver,
goto cleanup;
}
- ret = qemuMigrationRun(driver, vm, persist_xml, cookiein, cookieinlen,
- cookieout, cookieoutlen, flags, resource, &spec,
- dconn, graphicsuri, nmigrate_disks, migrate_disks,
- compression, migParams);
+ ret = qemuMigrationSrcRun(driver, vm, persist_xml, cookiein, cookieinlen,
+ cookieout, cookieoutlen, flags, resource, &spec,
+ dconn, graphicsuri, nmigrate_disks, migrate_disks,
+ compression, migParams);
cleanup:
VIR_FORCE_CLOSE(spec.dest.fd.qemu);
@@ -4223,14 +4227,15 @@ static int doTunnelMigrate(virQEMUDriverPtr driver,
* from libvirt.c, but running in source libvirtd context,
* instead of client app context & also adding in tunnel
* handling */
-static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *dconnuri,
- unsigned long flags,
- const char *dname,
- unsigned long resource)
+static int
+qemuMigrationSrcPerformPeer2Peer2(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *dconnuri,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource)
{
virDomainPtr ddomain = NULL;
char *uri_out = NULL;
@@ -4264,7 +4269,7 @@ static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
destflags = flags & ~(VIR_MIGRATE_ABORT_ON_ERROR |
VIR_MIGRATE_AUTO_CONVERGE);
- if (!(compression = qemuMigrationCompressionParse(NULL, 0, flags)))
+ if (!(compression = qemuMigrationAnyCompressionParse(NULL, 0, flags)))
goto cleanup;
VIR_DEBUG("Prepare2 %p", dconn);
@@ -4316,16 +4321,16 @@ static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
VIR_DEBUG("Perform %p", sconn);
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
if (flags & VIR_MIGRATE_TUNNELLED)
- ret = doTunnelMigrate(driver, vm, st, NULL,
- NULL, 0, NULL, NULL,
- flags, resource, dconn,
- NULL, 0, NULL, compression, &migParams);
+ ret = qemuMigrationSrcPerformTunnel(driver, vm, st, NULL,
+ NULL, 0, NULL, NULL,
+ flags, resource, dconn,
+ NULL, 0, NULL, compression, &migParams);
else
- ret = doNativeMigrate(driver, vm, NULL, uri_out,
- cookie, cookielen,
- NULL, NULL, /* No out cookie with v2 migration */
- flags, resource, dconn, NULL, 0, NULL,
- compression, &migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, NULL, uri_out,
+ cookie, cookielen,
+ NULL, NULL, /* No out cookie with v2 migration */
+ flags, resource, dconn, NULL, 0, NULL,
+ compression, &migParams);
/* Perform failed. Make sure Finish doesn't overwrite the error */
if (ret < 0)
@@ -4379,25 +4384,25 @@ static int doPeer2PeerMigrate2(virQEMUDriverPtr driver,
* instead of client app context & also adding in tunnel
* handling */
static int
-doPeer2PeerMigrate3(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virConnectPtr dconn,
- const char *dconnuri,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dname,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- unsigned long long bandwidth,
- bool useParams,
- unsigned long flags)
+qemuMigrationSrcPerformPeer2Peer3(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virConnectPtr dconn,
+ const char *dconnuri,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dname,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ unsigned long long bandwidth,
+ bool useParams,
+ unsigned long flags)
{
virDomainPtr ddomain = NULL;
char *uri_out = NULL;
@@ -4430,9 +4435,9 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver,
* bit here, because we are already running inside the context of
* a single job. */
- dom_xml = qemuMigrationBeginPhase(driver, vm, xmlin, dname,
- &cookieout, &cookieoutlen,
- nmigrate_disks, migrate_disks, flags);
+ dom_xml = qemuMigrationSrcBeginPhase(driver, vm, xmlin, dname,
+ &cookieout, &cookieoutlen,
+ nmigrate_disks, migrate_disks, flags);
if (!dom_xml)
goto cleanup;
@@ -4478,8 +4483,8 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver,
nbdPort) < 0)
goto cleanup;
- if (qemuMigrationCompressionDump(compression, ¶ms, &nparams,
- &maxparams, &flags) < 0)
+ if (qemuMigrationAnyCompressionDump(compression, ¶ms, &nparams,
+ &maxparams, &flags) < 0)
goto cleanup;
}
@@ -4562,19 +4567,19 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver,
cookieout = NULL;
cookieoutlen = 0;
if (flags & VIR_MIGRATE_TUNNELLED) {
- ret = doTunnelMigrate(driver, vm, st, persist_xml,
- cookiein, cookieinlen,
- &cookieout, &cookieoutlen,
- flags, bandwidth, dconn, graphicsuri,
- nmigrate_disks, migrate_disks, compression,
- migParams);
+ ret = qemuMigrationSrcPerformTunnel(driver, vm, st, persist_xml,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, bandwidth, dconn, graphicsuri,
+ nmigrate_disks, migrate_disks, compression,
+ migParams);
} else {
- ret = doNativeMigrate(driver, vm, persist_xml, uri,
- cookiein, cookieinlen,
- &cookieout, &cookieoutlen,
- flags, bandwidth, dconn, graphicsuri,
- nmigrate_disks, migrate_disks, compression,
- migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri,
+ cookiein, cookieinlen,
+ &cookieout, &cookieoutlen,
+ flags, bandwidth, dconn, graphicsuri,
+ nmigrate_disks, migrate_disks, compression,
+ migParams);
}
/* Perform failed. Make sure Finish doesn't overwrite the error */
@@ -4682,9 +4687,9 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver,
cookieinlen = cookieoutlen;
cookieout = NULL;
cookieoutlen = 0;
- ret = qemuMigrationConfirmPhase(driver, vm,
- cookiein, cookieinlen,
- flags, cancelled);
+ ret = qemuMigrationSrcConfirmPhase(driver, vm,
+ cookiein, cookieinlen,
+ flags, cancelled);
/* If Confirm3 returns -1, there's nothing more we can
* do, but fortunately worst case is that there is a
* domain left in 'paused' state on source.
@@ -4716,9 +4721,9 @@ doPeer2PeerMigrate3(virQEMUDriverPtr driver,
static void
-qemuMigrationConnectionClosed(virConnectPtr conn,
- int reason,
- void *opaque)
+qemuMigrationSrcConnectionClosed(virConnectPtr conn,
+ int reason,
+ void *opaque)
{
virDomainObjPtr vm = opaque;
@@ -4739,24 +4744,25 @@ static virConnectAuth virConnectAuthConfig = {
};
-static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
- virConnectPtr sconn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool *v3proto)
+static int
+qemuMigrationSrcPerformPeer2Peer(virQEMUDriverPtr driver,
+ virConnectPtr sconn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool *v3proto)
{
int ret = -1;
virConnectPtr dconn = NULL;
@@ -4815,7 +4821,7 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
cfg->keepAliveCount) < 0)
goto cleanup;
- if (virConnectRegisterCloseCallback(dconn, qemuMigrationConnectionClosed,
+ if (virConnectRegisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed,
vm, NULL) < 0) {
goto cleanup;
}
@@ -4873,20 +4879,20 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
flags &= ~VIR_MIGRATE_CHANGE_PROTECTION;
if (*v3proto) {
- ret = doPeer2PeerMigrate3(driver, sconn, dconn, dconnuri, vm, xmlin,
- persist_xml, dname, uri, graphicsuri,
- listenAddress, nmigrate_disks, migrate_disks,
- nbdPort, compression, migParams, resource,
- useParams, flags);
+ ret = qemuMigrationSrcPerformPeer2Peer3(driver, sconn, dconn, dconnuri, vm, xmlin,
+ persist_xml, dname, uri, graphicsuri,
+ listenAddress, nmigrate_disks, migrate_disks,
+ nbdPort, compression, migParams, resource,
+ useParams, flags);
} else {
- ret = doPeer2PeerMigrate2(driver, sconn, dconn, vm,
- dconnuri, flags, dname, resource);
+ ret = qemuMigrationSrcPerformPeer2Peer2(driver, sconn, dconn, vm,
+ dconnuri, flags, dname, resource);
}
cleanup:
orig_err = virSaveLastError();
qemuDomainObjEnterRemote(vm);
- virConnectUnregisterCloseCallback(dconn, qemuMigrationConnectionClosed);
+ virConnectUnregisterCloseCallback(dconn, qemuMigrationSrcConnectionClosed);
virObjectUnref(dconn);
qemuDomainObjExitRemote(vm);
if (orig_err) {
@@ -4904,28 +4910,28 @@ static int doPeer2PeerMigrate(virQEMUDriverPtr driver,
* perform phase of v2 non-peer2peer migration.
*/
static int
-qemuMigrationPerformJob(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto)
+qemuMigrationSrcPerformJob(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
{
virObjectEventPtr event = NULL;
int ret = -1;
@@ -4941,27 +4947,27 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
goto endjob;
}
- if (!qemuMigrationIsAllowed(driver, vm, true, flags))
+ if (!qemuMigrationSrcIsAllowed(driver, vm, true, flags))
goto endjob;
if (!(flags & (VIR_MIGRATE_UNSAFE | VIR_MIGRATE_OFFLINE)) &&
- !qemuMigrationIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
+ !qemuMigrationSrcIsSafe(vm->def, nmigrate_disks, migrate_disks, flags))
goto endjob;
- qemuMigrationStoreDomainState(vm);
+ qemuMigrationSrcStoreDomainState(vm);
if ((flags & (VIR_MIGRATE_TUNNELLED | VIR_MIGRATE_PEER2PEER))) {
- ret = doPeer2PeerMigrate(driver, conn, vm, xmlin, persist_xml,
- dconnuri, uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams, flags, dname, resource,
- &v3proto);
+ ret = qemuMigrationSrcPerformPeer2Peer(driver, conn, vm, xmlin, persist_xml,
+ dconnuri, uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams, flags, dname, resource,
+ &v3proto);
} else {
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM2);
- ret = doNativeMigrate(driver, vm, persist_xml, uri, cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource, NULL, NULL, 0, NULL,
- compression, migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource, NULL, NULL, 0, NULL,
+ compression, migParams);
}
if (ret < 0)
goto endjob;
@@ -4988,9 +4994,9 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
* here
*/
if (!v3proto && ret < 0)
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
- if (qemuMigrationRestoreDomainState(driver, vm)) {
+ if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
@@ -5021,22 +5027,22 @@ qemuMigrationPerformJob(virQEMUDriverPtr driver,
* This implements perform phase of v3 migration protocol.
*/
static int
-qemuMigrationPerformPhase(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *persist_xml,
- const char *uri,
- const char *graphicsuri,
- size_t nmigrate_disks,
- const char **migrate_disks,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- unsigned long resource)
+qemuMigrationSrcPerformPhase(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *persist_xml,
+ const char *uri,
+ const char *graphicsuri,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ unsigned long resource)
{
virObjectEventPtr event = NULL;
int ret = -1;
@@ -5051,15 +5057,15 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver,
qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3);
virCloseCallbacksUnset(driver->closeCallbacks, vm,
- qemuMigrationCleanup);
+ qemuMigrationSrcCleanup);
- ret = doNativeMigrate(driver, vm, persist_xml, uri, cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource, NULL, graphicsuri,
- nmigrate_disks, migrate_disks, compression, migParams);
+ ret = qemuMigrationSrcPerformNative(driver, vm, persist_xml, uri, cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource, NULL, graphicsuri,
+ nmigrate_disks, migrate_disks, compression, migParams);
if (ret < 0) {
- if (qemuMigrationRestoreDomainState(driver, vm)) {
+ if (qemuMigrationSrcRestoreDomainState(driver, vm)) {
event = virDomainEventLifecycleNewFromObj(vm,
VIR_DOMAIN_EVENT_RESUMED,
VIR_DOMAIN_EVENT_RESUMED_MIGRATED);
@@ -5070,12 +5076,12 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver,
qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_PERFORM3_DONE);
if (virCloseCallbacksSet(driver->closeCallbacks, vm, conn,
- qemuMigrationCleanup) < 0)
+ qemuMigrationSrcCleanup) < 0)
goto endjob;
endjob:
if (ret < 0) {
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT);
qemuMigrationJobFinish(driver, vm);
} else {
qemuMigrationJobContinue(vm);
@@ -5091,28 +5097,28 @@ qemuMigrationPerformPhase(virQEMUDriverPtr driver,
}
int
-qemuMigrationPerform(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto)
+qemuMigrationSrcPerform(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto)
{
VIR_DEBUG("driver=%p, conn=%p, vm=%p, xmlin=%s, dconnuri=%s, "
"uri=%s, graphicsuri=%s, listenAddress=%s, "
@@ -5132,13 +5138,13 @@ qemuMigrationPerform(virQEMUDriverPtr driver,
return -1;
}
- return qemuMigrationPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
- graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, dname, resource, v3proto);
+ return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, dconnuri, uri,
+ graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, dname, resource, v3proto);
} else {
if (dconnuri) {
virReportError(VIR_ERR_INTERNAL_ERROR,
@@ -5147,27 +5153,27 @@ qemuMigrationPerform(virQEMUDriverPtr driver,
}
if (v3proto) {
- return qemuMigrationPerformPhase(driver, conn, vm, persist_xml, uri,
- graphicsuri,
- nmigrate_disks, migrate_disks,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen,
- flags, resource);
+ return qemuMigrationSrcPerformPhase(driver, conn, vm, persist_xml, uri,
+ graphicsuri,
+ nmigrate_disks, migrate_disks,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen,
+ flags, resource);
} else {
- return qemuMigrationPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
- uri, graphicsuri, listenAddress,
- nmigrate_disks, migrate_disks, nbdPort,
- compression, migParams,
- cookiein, cookieinlen,
- cookieout, cookieoutlen, flags,
- dname, resource, v3proto);
+ return qemuMigrationSrcPerformJob(driver, conn, vm, xmlin, persist_xml, NULL,
+ uri, graphicsuri, listenAddress,
+ nmigrate_disks, migrate_disks, nbdPort,
+ compression, migParams,
+ cookiein, cookieinlen,
+ cookieout, cookieoutlen, flags,
+ dname, resource, v3proto);
}
}
}
static int
-qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def)
+qemuMigrationDstVPAssociatePortProfiles(virDomainDefPtr def)
{
size_t i;
int last_good_net = -1;
@@ -5219,10 +5225,10 @@ qemuMigrationVPAssociatePortProfiles(virDomainDefPtr def)
static int
-qemuMigrationPersist(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuMigrationCookiePtr mig,
- bool ignoreSaveError)
+qemuMigrationDstPersist(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuMigrationCookiePtr mig,
+ bool ignoreSaveError)
{
virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
virCapsPtr caps = NULL;
@@ -5271,16 +5277,16 @@ qemuMigrationPersist(virQEMUDriverPtr driver,
virDomainPtr
-qemuMigrationFinish(virQEMUDriverPtr driver,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- int retcode,
- bool v3proto)
+qemuMigrationDstFinish(virQEMUDriverPtr driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ int retcode,
+ bool v3proto)
{
virDomainPtr dom = NULL;
qemuMigrationCookiePtr mig = NULL;
@@ -5304,7 +5310,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
priv->migrationPort = 0;
if (!qemuMigrationJobIsActive(vm, QEMU_ASYNC_JOB_MIGRATION_IN)) {
- qemuMigrationErrorReport(driver, vm->def->name);
+ qemuMigrationDstErrorReport(driver, vm->def->name);
goto cleanup;
}
@@ -5314,7 +5320,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
v3proto ? QEMU_MIGRATION_PHASE_FINISH3
: QEMU_MIGRATION_PHASE_FINISH2);
- qemuDomainCleanupRemove(vm, qemuMigrationPrepareCleanup);
+ qemuDomainCleanupRemove(vm, qemuMigrationDstPrepareCleanup);
VIR_FREE(priv->job.completed);
cookie_flags = QEMU_MIGRATION_COOKIE_NETWORK |
@@ -5330,7 +5336,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
if (flags & VIR_MIGRATE_OFFLINE) {
if (retcode == 0 &&
- qemuMigrationPersist(driver, vm, mig, false) == 0)
+ qemuMigrationDstPersist(driver, vm, mig, false) == 0)
dom = virGetDomain(dconn, vm->def->name, vm->def->uuid, -1);
goto endjob;
}
@@ -5346,17 +5352,17 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("guest unexpectedly quit"));
- qemuMigrationErrorReport(driver, vm->def->name);
+ qemuMigrationDstErrorReport(driver, vm->def->name);
goto endjob;
}
- if (qemuMigrationVPAssociatePortProfiles(vm->def) < 0)
+ if (qemuMigrationDstVPAssociatePortProfiles(vm->def) < 0)
goto endjob;
- if (mig->network && qemuDomainMigrateOPDRelocate(driver, vm, mig) < 0)
+ if (mig->network && qemuMigrationDstOPDRelocate(driver, vm, mig) < 0)
VIR_WARN("unable to provide network data for relocation");
- if (qemuMigrationStopNBDServer(driver, vm, mig) < 0)
+ if (qemuMigrationDstStopNBDServer(driver, vm, mig) < 0)
goto endjob;
if (qemuRefreshVirtioChannelState(driver, vm,
@@ -5367,7 +5373,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
goto endjob;
if (flags & VIR_MIGRATE_PERSIST_DEST) {
- if (qemuMigrationPersist(driver, vm, mig, !v3proto) < 0) {
+ if (qemuMigrationDstPersist(driver, vm, mig, !v3proto) < 0) {
/* Hmpf. Migration was successful, but making it persistent
* was not. If we report successful, then when this domain
* shuts down, management tools are in for a surprise. On the
@@ -5388,9 +5394,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
/* We need to wait for QEMU to process all data sent by the source
* before starting guest CPUs.
*/
- if (qemuMigrationWaitForDestCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
+ if (qemuMigrationDstWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ !!(flags & VIR_MIGRATE_POSTCOPY)) < 0) {
/* There's not much we can do for v2 protocol since the
* original domain on the source host is already gone.
*/
@@ -5462,9 +5468,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
}
if (inPostCopy) {
- if (qemuMigrationWaitForDestCompletion(driver, vm,
- QEMU_ASYNC_JOB_MIGRATION_IN,
- false) < 0) {
+ if (qemuMigrationDstWaitForCompletion(driver, vm,
+ QEMU_ASYNC_JOB_MIGRATION_IN,
+ false) < 0) {
goto endjob;
}
if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) {
@@ -5510,7 +5516,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
VIR_DOMAIN_EVENT_STOPPED_FAILED);
qemuDomainEventQueue(driver, event);
} else {
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
}
}
@@ -5531,7 +5537,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
VIR_FREE(priv->job.completed);
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_MIGRATION_IN);
qemuMigrationJobFinish(driver, vm);
if (!virDomainObjIsActive(vm))
@@ -5562,10 +5568,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver,
/* Helper function called while vm is active. */
int
-qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
- int fd,
- const char *compressor,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
+ int fd,
+ const char *compressor,
+ qemuDomainAsyncJob asyncJob)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
int rc;
@@ -5649,7 +5655,7 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
if (rc < 0)
goto cleanup;
- rc = qemuMigrationWaitForCompletion(driver, vm, asyncJob, NULL, 0);
+ rc = qemuMigrationSrcWaitForCompletion(driver, vm, asyncJob, NULL, 0);
if (rc < 0) {
if (rc == -2) {
@@ -5700,8 +5706,8 @@ qemuMigrationToFile(virQEMUDriverPtr driver, virDomainObjPtr vm,
int
-qemuMigrationCancel(virQEMUDriverPtr driver,
- virDomainObjPtr vm)
+qemuMigrationSrcCancel(virQEMUDriverPtr driver,
+ virDomainObjPtr vm)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
virHashTablePtr blockJobs = NULL;
@@ -5750,8 +5756,8 @@ qemuMigrationCancel(virQEMUDriverPtr driver,
}
}
- if (qemuMigrationCancelDriveMirror(driver, vm, false,
- QEMU_ASYNC_JOB_NONE, NULL) < 0)
+ if (qemuMigrationSrcCancelDriveMirror(driver, vm, false,
+ QEMU_ASYNC_JOB_NONE, NULL) < 0)
goto endsyncjob;
ret = 0;
@@ -5863,7 +5869,7 @@ qemuMigrationJobFinish(virQEMUDriverPtr driver, virDomainObjPtr vm)
static void
-qemuMigrationErrorFree(void *data,
+qemuMigrationDstErrorFree(void *data,
const void *name ATTRIBUTE_UNUSED)
{
virErrorPtr err = data;
@@ -5871,9 +5877,9 @@ qemuMigrationErrorFree(void *data,
}
int
-qemuMigrationErrorInit(virQEMUDriverPtr driver)
+qemuMigrationDstErrorInit(virQEMUDriverPtr driver)
{
- driver->migrationErrors = virHashAtomicNew(64, qemuMigrationErrorFree);
+ driver->migrationErrors = virHashAtomicNew(64, qemuMigrationDstErrorFree);
if (driver->migrationErrors)
return 0;
else
@@ -5885,9 +5891,9 @@ qemuMigrationErrorInit(virQEMUDriverPtr driver)
* invalid after calling this function.
*/
void
-qemuMigrationErrorSave(virQEMUDriverPtr driver,
- const char *name,
- virErrorPtr err)
+qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
+ const char *name,
+ virErrorPtr err)
{
if (!err)
return;
@@ -5901,8 +5907,8 @@ qemuMigrationErrorSave(virQEMUDriverPtr driver,
}
void
-qemuMigrationErrorReport(virQEMUDriverPtr driver,
- const char *name)
+qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
+ const char *name)
{
virErrorPtr err;
@@ -5918,9 +5924,9 @@ qemuMigrationErrorReport(virQEMUDriverPtr driver,
/* don't ever pass NULL params with non zero nparams */
qemuMigrationCompressionPtr
-qemuMigrationCompressionParse(virTypedParameterPtr params,
- int nparams,
- unsigned long flags)
+qemuMigrationAnyCompressionParse(virTypedParameterPtr params,
+ int nparams,
+ unsigned long flags)
{
size_t i;
qemuMigrationCompressionPtr compression = NULL;
@@ -6001,11 +6007,11 @@ qemuMigrationCompressionParse(virTypedParameterPtr params,
}
int
-qemuMigrationCompressionDump(qemuMigrationCompressionPtr compression,
- virTypedParameterPtr *params,
- int *nparams,
- int *maxparams,
- unsigned long *flags)
+qemuMigrationAnyCompressionDump(qemuMigrationCompressionPtr compression,
+ virTypedParameterPtr *params,
+ int *nparams,
+ int *maxparams,
+ unsigned long *flags)
{
size_t i;
@@ -6052,15 +6058,15 @@ qemuMigrationCompressionDump(qemuMigrationCompressionPtr compression,
/*
- * qemuMigrationReset:
+ * qemuMigrationParamsReset:
*
* Reset all migration parameters so that the next job which internally uses
* migration (save, managedsave, snapshots, dump) will not try to use them.
*/
void
-qemuMigrationReset(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob job)
+qemuMigrationParamsReset(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob job)
{
qemuMonitorMigrationCaps cap;
virErrorPtr err = virSaveLastError();
@@ -6068,12 +6074,12 @@ qemuMigrationReset(virQEMUDriverPtr driver,
if (!virDomainObjIsActive(vm))
goto cleanup;
- if (qemuMigrationResetTLS(driver, vm, job) < 0)
+ if (qemuMigrationParamsResetTLS(driver, vm, job) < 0)
goto cleanup;
for (cap = 0; cap < QEMU_MONITOR_MIGRATION_CAPS_LAST; cap++) {
- if (qemuMigrationCapsGet(vm, cap) &&
- qemuMigrationSetOption(driver, vm, cap, false, job) < 0)
+ if (qemuMigrationAnyCapsGet(vm, cap) &&
+ qemuMigrationOptionSet(driver, vm, cap, false, job) < 0)
goto cleanup;
}
@@ -6086,10 +6092,10 @@ qemuMigrationReset(virQEMUDriverPtr driver,
int
-qemuMigrationFetchMirrorStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo)
+qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo)
{
size_t i;
qemuDomainObjPrivatePtr priv = vm->privateData;
@@ -6137,8 +6143,8 @@ qemuMigrationFetchMirrorStats(virQEMUDriverPtr driver,
bool
-qemuMigrationCapsGet(virDomainObjPtr vm,
- qemuMonitorMigrationCaps cap)
+qemuMigrationAnyCapsGet(virDomainObjPtr vm,
+ qemuMonitorMigrationCaps cap)
{
qemuDomainObjPrivatePtr priv = vm->privateData;
bool enabled = false;
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index 234f1eb858..ecb176e06a 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -25,6 +25,20 @@
# include "qemu_conf.h"
# include "qemu_domain.h"
+/*
+ * General function naming conventions:
+ *
+ * - qemuMigrationSrcXXX - only runs on source host
+ * - qemuMigrationDstXXX - only runs on dest host
+ * - qemuMigrationAnyXXX - runs on source or dest host
+ *
+ * Exceptions:
+ *
+ * - qemuMigrationParamsXXX - runs on source or dest host
+ * - qemuMigrationOptionXXX - runs on source or dest host
+ * - qemuMigrationJobXXX - runs on source or dest host
+ */
+
typedef struct _qemuMigrationCompression qemuMigrationCompression;
typedef qemuMigrationCompression *qemuMigrationCompressionPtr;
@@ -112,15 +126,15 @@ struct _qemuMigrationCompression {
};
qemuMigrationCompressionPtr
-qemuMigrationCompressionParse(virTypedParameterPtr params,
- int nparams,
- unsigned long flags);
+qemuMigrationAnyCompressionParse(virTypedParameterPtr params,
+ int nparams,
+ unsigned long flags);
int
-qemuMigrationCompressionDump(qemuMigrationCompressionPtr compression,
- virTypedParameterPtr *params,
- int *nparams,
- int *maxparams,
- unsigned long *flags);
+qemuMigrationAnyCompressionDump(qemuMigrationCompressionPtr compression,
+ virTypedParameterPtr *params,
+ int *nparams,
+ int *maxparams,
+ unsigned long *flags);
void
qemuMigrationParamsClear(qemuMonitorMigrationParamsPtr migParams);
@@ -134,166 +148,166 @@ qemuMigrationParams(virTypedParameterPtr params,
unsigned long flags);
int
-qemuMigrationSetOffline(virQEMUDriverPtr driver,
- virDomainObjPtr vm);
+qemuMigrationSrcSetOffline(virQEMUDriverPtr driver,
+ virDomainObjPtr vm);
char *
-qemuMigrationBegin(virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *dname,
- char **cookieout,
- int *cookieoutlen,
- size_t nmigrate_disks,
- const char **migrate_disks,
- unsigned long flags);
+qemuMigrationSrcBegin(virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *dname,
+ char **cookieout,
+ int *cookieoutlen,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ unsigned long flags);
virDomainDefPtr
-qemuMigrationPrepareDef(virQEMUDriverPtr driver,
- const char *dom_xml,
- const char *dname,
- char **origname);
+qemuMigrationAnyPrepareDef(virQEMUDriverPtr driver,
+ const char *dom_xml,
+ const char *dname,
+ char **origname);
int
-qemuMigrationPrepareTunnel(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- virStreamPtr st,
- virDomainDefPtr *def,
- const char *origname,
- unsigned long flags);
+qemuMigrationDstPrepareTunnel(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ virStreamPtr st,
+ virDomainDefPtr *def,
+ const char *origname,
+ unsigned long flags);
int
-qemuMigrationPrepareDirect(virQEMUDriverPtr driver,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- const char *uri_in,
- char **uri_out,
- virDomainDefPtr *def,
- const char *origname,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- unsigned long flags);
+qemuMigrationDstPrepareDirect(virQEMUDriverPtr driver,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ const char *uri_in,
+ char **uri_out,
+ virDomainDefPtr *def,
+ const char *origname,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ unsigned long flags);
int
-qemuMigrationPerform(virQEMUDriverPtr driver,
- virConnectPtr conn,
- virDomainObjPtr vm,
- const char *xmlin,
- const char *persist_xml,
- const char *dconnuri,
- const char *uri,
- const char *graphicsuri,
- const char *listenAddress,
- size_t nmigrate_disks,
- const char **migrate_disks,
- int nbdPort,
- qemuMigrationCompressionPtr compression,
- qemuMonitorMigrationParamsPtr migParams,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- const char *dname,
- unsigned long resource,
- bool v3proto);
+qemuMigrationSrcPerform(virQEMUDriverPtr driver,
+ virConnectPtr conn,
+ virDomainObjPtr vm,
+ const char *xmlin,
+ const char *persist_xml,
+ const char *dconnuri,
+ const char *uri,
+ const char *graphicsuri,
+ const char *listenAddress,
+ size_t nmigrate_disks,
+ const char **migrate_disks,
+ int nbdPort,
+ qemuMigrationCompressionPtr compression,
+ qemuMonitorMigrationParamsPtr migParams,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ const char *dname,
+ unsigned long resource,
+ bool v3proto);
virDomainPtr
-qemuMigrationFinish(virQEMUDriverPtr driver,
- virConnectPtr dconn,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- char **cookieout,
- int *cookieoutlen,
- unsigned long flags,
- int retcode,
- bool v3proto);
+qemuMigrationDstFinish(virQEMUDriverPtr driver,
+ virConnectPtr dconn,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ char **cookieout,
+ int *cookieoutlen,
+ unsigned long flags,
+ int retcode,
+ bool v3proto);
int
-qemuMigrationConfirm(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *cookiein,
- int cookieinlen,
- unsigned int flags,
- int cancelled);
+qemuMigrationSrcConfirm(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *cookiein,
+ int cookieinlen,
+ unsigned int flags,
+ int cancelled);
bool
-qemuMigrationIsAllowed(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- bool remote,
- unsigned int flags);
+qemuMigrationSrcIsAllowed(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ bool remote,
+ unsigned int flags);
int
-qemuMigrationToFile(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- int fd,
- const char *compressor,
- qemuDomainAsyncJob asyncJob)
+qemuMigrationSrcToFile(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ int fd,
+ const char *compressor,
+ qemuDomainAsyncJob asyncJob)
ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
int
-qemuMigrationCancel(virQEMUDriverPtr driver,
- virDomainObjPtr vm);
+qemuMigrationSrcCancel(virQEMUDriverPtr driver,
+ virDomainObjPtr vm);
int
-qemuMigrationFetchStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo,
- char **error);
+qemuMigrationAnyFetchStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo,
+ char **error);
int
-qemuMigrationErrorInit(virQEMUDriverPtr driver);
+qemuMigrationDstErrorInit(virQEMUDriverPtr driver);
void
-qemuMigrationErrorSave(virQEMUDriverPtr driver,
- const char *name,
- virErrorPtr err);
+qemuMigrationDstErrorSave(virQEMUDriverPtr driver,
+ const char *name,
+ virErrorPtr err);
void
-qemuMigrationErrorReport(virQEMUDriverPtr driver,
- const char *name);
+qemuMigrationDstErrorReport(virQEMUDriverPtr driver,
+ const char *name);
int
-qemuMigrationCheckIncoming(virQEMUCapsPtr qemuCaps,
- const char *migrateFrom);
+qemuMigrationDstCheckProtocol(virQEMUCapsPtr qemuCaps,
+ const char *migrateFrom);
char *
-qemuMigrationIncomingURI(const char *migrateFrom,
- int migrateFd);
+qemuMigrationDstGetURI(const char *migrateFrom,
+ int migrateFd);
int
-qemuMigrationRunIncoming(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- const char *uri,
- qemuDomainAsyncJob asyncJob);
+qemuMigrationDstRun(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ const char *uri,
+ qemuDomainAsyncJob asyncJob);
void
-qemuMigrationPostcopyFailed(virQEMUDriverPtr driver,
+qemuMigrationAnyPostcopyFailed(virQEMUDriverPtr driver,
virDomainObjPtr vm);
void
-qemuMigrationReset(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob job);
+qemuMigrationParamsReset(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob job);
int
-qemuMigrationFetchMirrorStats(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- qemuDomainAsyncJob asyncJob,
- qemuDomainJobInfoPtr jobInfo);
+qemuMigrationSrcFetchMirrorStats(virQEMUDriverPtr driver,
+ virDomainObjPtr vm,
+ qemuDomainAsyncJob asyncJob,
+ qemuDomainJobInfoPtr jobInfo);
bool
-qemuMigrationCapsGet(virDomainObjPtr vm,
- qemuMonitorMigrationCaps cap);
+qemuMigrationAnyCapsGet(virDomainObjPtr vm,
+ qemuMonitorMigrationCaps cap);
#endif /* __QEMU_MIGRATION_H__ */
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index d0bab5d84e..13b05d236a 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3069,7 +3069,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
* confirm success or failure yet; killing it seems safest unless
* we already started guest CPUs or we were in post-copy mode */
if (postcopy) {
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
} else if (state != VIR_DOMAIN_RUNNING) {
VIR_DEBUG("Killing migrated domain %s", vm->def->name);
return -1;
@@ -3077,7 +3077,7 @@ qemuProcessRecoverMigrationIn(virQEMUDriverPtr driver,
break;
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_NONE);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE);
return 0;
}
@@ -3115,11 +3115,11 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
* post-copy mode
*/
if (postcopy) {
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
} else {
VIR_DEBUG("Cancelling unfinished migration of domain %s",
vm->def->name);
- if (qemuMigrationCancel(driver, vm) < 0) {
+ if (qemuMigrationSrcCancel(driver, vm) < 0) {
VIR_WARN("Could not cancel ongoing migration of domain %s",
vm->def->name);
}
@@ -3133,7 +3133,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
* post-copy mode we can use PAUSED_POSTCOPY_FAILED state for this
*/
if (postcopy)
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
break;
case QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED:
@@ -3142,7 +3142,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
* as broken in that case
*/
if (postcopy) {
- qemuMigrationPostcopyFailed(driver, vm);
+ qemuMigrationAnyPostcopyFailed(driver, vm);
} else {
VIR_DEBUG("Resuming domain %s after failed migration",
vm->def->name);
@@ -3171,7 +3171,7 @@ qemuProcessRecoverMigrationOut(virQEMUDriverPtr driver,
}
}
- qemuMigrationReset(driver, vm, QEMU_ASYNC_JOB_NONE);
+ qemuMigrationParamsReset(driver, vm, QEMU_ASYNC_JOB_NONE);
return 0;
}
@@ -4142,7 +4142,7 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
{
qemuProcessIncomingDefPtr inc = NULL;
- if (qemuMigrationCheckIncoming(qemuCaps, migrateFrom) < 0)
+ if (qemuMigrationDstCheckProtocol(qemuCaps, migrateFrom) < 0)
return NULL;
if (VIR_ALLOC(inc) < 0)
@@ -4151,7 +4151,7 @@ qemuProcessIncomingDefNew(virQEMUCapsPtr qemuCaps,
if (VIR_STRDUP(inc->address, listenAddress) < 0)
goto error;
- inc->launchURI = qemuMigrationIncomingURI(migrateFrom, fd);
+ inc->launchURI = qemuMigrationDstGetURI(migrateFrom, fd);
if (!inc->launchURI)
goto error;
@@ -6348,7 +6348,7 @@ qemuProcessStart(virConnectPtr conn,
if (incoming &&
incoming->deferredURI &&
- qemuMigrationRunIncoming(driver, vm, incoming->deferredURI, asyncJob) < 0)
+ qemuMigrationDstRun(driver, vm, incoming->deferredURI, asyncJob) < 0)
goto stop;
if (qemuProcessFinishStartup(driver, vm, asyncJob,
diff --git a/tests/qemuxml2argvtest.c b/tests/qemuxml2argvtest.c
index 5791cccbd0..12983bb158 100644
--- a/tests/qemuxml2argvtest.c
+++ b/tests/qemuxml2argvtest.c
@@ -448,8 +448,8 @@ testCompareXMLToArgv(const void *data)
goto cleanup;
if (info->migrateFrom &&
- !(migrateURI = qemuMigrationIncomingURI(info->migrateFrom,
- info->migrateFd)))
+ !(migrateURI = qemuMigrationDstGetURI(info->migrateFrom,
+ info->migrateFd)))
goto cleanup;
if (!(vm = virDomainObjNew(driver.xmlopt)))
--
2.14.3
7 years, 1 month
[libvirt] [python PATCH 0/4] followup fixes for virPyDictToTypedParams
by Pavel Hrdina
Pavel Hrdina (4):
Use static variables to store virPyDictToTypedParams hints
Fix order of virPyDictToTypedParams hints
Add missing virPyDictToTypedParams hint for migration params
Fix virPyDictToTypedParams type hint for block copy params
libvirt-override.c | 150 ++++++++++++-----------------------------------------
libvirt-utils.h | 2 +
2 files changed, 35 insertions(+), 117 deletions(-)
--
2.14.3
7 years, 1 month
[libvirt] [PATCH python 1/1] Set hints for virPyDictToTypedParams
by Edgar Kaziakhmedov
Predefine hints for all parameters possible to avoid wrong type
convert.
Signed-off-by: Edgar Kaziakhmedov <edgar.kaziakhmedov(a)virtuozzo.com>
---
libvirt-override.c | 128 +++++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 124 insertions(+), 4 deletions(-)
diff --git a/libvirt-override.c b/libvirt-override.c
index 78a7f08..dba42d4 100644
--- a/libvirt-override.c
+++ b/libvirt-override.c
@@ -7750,7 +7750,9 @@ libvirt_virDomainMigrate3(PyObject *self ATTRIBUTE_UNUSED,
PyObject *dict;
unsigned int flags;
virTypedParameterPtr params;
- int nparams;
+ virPyTypedParamsHintPtr hparams;
+ int nparams = 0;
+ int nhparams = 15;
virDomainPtr ddom = NULL;
if (!PyArg_ParseTuple(args, (char *) "OOOI:virDomainMigrate3",
@@ -7760,14 +7762,64 @@ libvirt_virDomainMigrate3(PyObject *self ATTRIBUTE_UNUSED,
domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain);
dconn = (virConnectPtr) PyvirConnect_Get(pyobj_dconn);
- if (virPyDictToTypedParams(dict, ¶ms, &nparams, NULL, 0) < 0)
+ hparams = malloc(sizeof(virPyTypedParamsHint) * nhparams);
+ hparams[0].name = VIR_MIGRATE_PARAM_URI;
+ hparams[0].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[1].name = VIR_MIGRATE_PARAM_DEST_NAME;
+ hparams[1].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[2].name = VIR_MIGRATE_PARAM_DEST_XML;
+ hparams[2].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[3].name = VIR_MIGRATE_PARAM_GRAPHICS_URI;
+ hparams[3].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[4].name = VIR_MIGRATE_PARAM_BANDWIDTH;
+ hparams[4].type = VIR_TYPED_PARAM_ULLONG;
+
+ hparams[5].name = VIR_MIGRATE_PARAM_LISTEN_ADDRESS;
+ hparams[5].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[6].name = VIR_MIGRATE_PARAM_DISKS_PORT;
+ hparams[6].type = VIR_TYPED_PARAM_INT;
+
+ hparams[7].name = VIR_MIGRATE_PARAM_COMPRESSION;
+ hparams[7].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[8].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_DTHREADS;
+ hparams[8].type = VIR_TYPED_PARAM_INT;
+
+ hparams[9].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_LEVEL;
+ hparams[9].type = VIR_TYPED_PARAM_INT;
+
+ hparams[10].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_THREADS;
+ hparams[10].type = VIR_TYPED_PARAM_INT;
+
+ hparams[11].name = VIR_MIGRATE_PARAM_COMPRESSION_XBZRLE_CACHE;
+ hparams[11].type = VIR_TYPED_PARAM_ULLONG;
+
+ hparams[12].name = VIR_MIGRATE_PARAM_PERSIST_XML;
+ hparams[12].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[13].name = VIR_MIGRATE_PARAM_AUTO_CONVERGE_INITIAL;
+ hparams[13].type = VIR_TYPED_PARAM_INT;
+
+ hparams[14].name = VIR_MIGRATE_PARAM_AUTO_CONVERGE_INCREMENT;
+ hparams[14].type = VIR_TYPED_PARAM_INT;
+
+ if (virPyDictToTypedParams(dict, ¶ms, &nparams,
+ hparams, nhparams) < 0) {
+ free(hparams);
return NULL;
+ }
LIBVIRT_BEGIN_ALLOW_THREADS;
ddom = virDomainMigrate3(domain, dconn, params, nparams, flags);
LIBVIRT_END_ALLOW_THREADS;
virTypedParamsFree(params, nparams);
+ free(hparams);
return libvirt_virDomainPtrWrap(ddom);
}
@@ -7781,7 +7833,9 @@ libvirt_virDomainMigrateToURI3(PyObject *self ATTRIBUTE_UNUSED,
PyObject *dict;
unsigned int flags;
virTypedParameterPtr params;
+ virPyTypedParamsHintPtr hparams;
int nparams;
+ int nhparams = 15;
int ret = -1;
if (!PyArg_ParseTuple(args, (char *) "OzOI:virDomainMigrate3",
@@ -7790,14 +7844,64 @@ libvirt_virDomainMigrateToURI3(PyObject *self ATTRIBUTE_UNUSED,
domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain);
- if (virPyDictToTypedParams(dict, ¶ms, &nparams, NULL, 0) < 0)
+ hparams = malloc(sizeof(virPyTypedParamsHint) * nhparams);
+ hparams[0].name = VIR_MIGRATE_PARAM_URI;
+ hparams[0].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[1].name = VIR_MIGRATE_PARAM_DEST_NAME;
+ hparams[1].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[2].name = VIR_MIGRATE_PARAM_DEST_XML;
+ hparams[2].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[3].name = VIR_MIGRATE_PARAM_GRAPHICS_URI;
+ hparams[3].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[4].name = VIR_MIGRATE_PARAM_BANDWIDTH;
+ hparams[4].type = VIR_TYPED_PARAM_ULLONG;
+
+ hparams[5].name = VIR_MIGRATE_PARAM_LISTEN_ADDRESS;
+ hparams[5].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[6].name = VIR_MIGRATE_PARAM_DISKS_PORT;
+ hparams[6].type = VIR_TYPED_PARAM_INT;
+
+ hparams[7].name = VIR_MIGRATE_PARAM_COMPRESSION;
+ hparams[7].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[8].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_DTHREADS;
+ hparams[8].type = VIR_TYPED_PARAM_INT;
+
+ hparams[9].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_LEVEL;
+ hparams[9].type = VIR_TYPED_PARAM_INT;
+
+ hparams[10].name = VIR_MIGRATE_PARAM_COMPRESSION_MT_THREADS;
+ hparams[10].type = VIR_TYPED_PARAM_INT;
+
+ hparams[11].name = VIR_MIGRATE_PARAM_COMPRESSION_XBZRLE_CACHE;
+ hparams[11].type = VIR_TYPED_PARAM_ULLONG;
+
+ hparams[12].name = VIR_MIGRATE_PARAM_PERSIST_XML;
+ hparams[12].type = VIR_TYPED_PARAM_STRING;
+
+ hparams[13].name = VIR_MIGRATE_PARAM_AUTO_CONVERGE_INITIAL;
+ hparams[13].type = VIR_TYPED_PARAM_INT;
+
+ hparams[14].name = VIR_MIGRATE_PARAM_AUTO_CONVERGE_INCREMENT;
+ hparams[14].type = VIR_TYPED_PARAM_INT;
+
+ if (virPyDictToTypedParams(dict, ¶ms, &nparams,
+ hparams, nhparams) < 0) {
+ free(hparams);
return NULL;
+ }
LIBVIRT_BEGIN_ALLOW_THREADS;
ret = virDomainMigrateToURI3(domain, dconnuri, params, nparams, flags);
LIBVIRT_END_ALLOW_THREADS;
virTypedParamsFree(params, nparams);
+ free(hparams);
return libvirt_intWrap(ret);
}
#endif /* LIBVIR_CHECK_VERSION(1, 1, 0) */
@@ -8650,7 +8754,9 @@ libvirt_virDomainBlockCopy(PyObject *self ATTRIBUTE_UNUSED,
char *disk = NULL;
char *destxml = NULL;
virTypedParameterPtr params = NULL;
+ virPyTypedParamsHintPtr hparams;
int nparams = 0;
+ int nhparams = 3;
unsigned int flags = 0;
int c_retval;
@@ -8659,8 +8765,22 @@ libvirt_virDomainBlockCopy(PyObject *self ATTRIBUTE_UNUSED,
return NULL;
if (PyDict_Check(pyobj_dict)) {
- if (virPyDictToTypedParams(pyobj_dict, ¶ms, &nparams, NULL, 0) < 0)
+ hparams = malloc(sizeof(virPyTypedParamsHint) * nhparams);
+ hparams[0].name = VIR_DOMAIN_BLOCK_COPY_BANDWIDTH;
+ hparams[0].type = VIR_TYPED_PARAM_ULLONG;
+
+ hparams[1].name = VIR_DOMAIN_BLOCK_COPY_GRANULARITY;
+ hparams[1].type = VIR_TYPED_PARAM_UINT;
+
+ hparams[2].name = VIR_DOMAIN_BLOCK_COPY_BUF_SIZE;
+ hparams[2].type = VIR_TYPED_PARAM_UINT;
+
+ if (virPyDictToTypedParams(pyobj_dict, ¶ms, &nparams,
+ hparams, nhparams) < 0) {
+ free(hparams);
return NULL;
+ }
+ free(hparams);
}
dom = (virDomainPtr) PyvirDomain_Get(pyobj_dom);
--
2.11.0
7 years, 1 month
[libvirt] [PATCH] bhyve: Fix build
by Andrea Bolognani
Commit 2d43f0a2dcfd dropped virDomainDiskTranslateSourcePool()'s
first argument but failed to update callers in the bhyve driver.
Signed-off-by: Andrea Bolognani <abologna(a)redhat.com>
---
Pushed under the build breaker rule.
src/bhyve/bhyve_command.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/src/bhyve/bhyve_command.c b/src/bhyve/bhyve_command.c
index c1241b811..fd738b42c 100644
--- a/src/bhyve/bhyve_command.c
+++ b/src/bhyve/bhyve_command.c
@@ -198,7 +198,7 @@ bhyveBuildAHCIControllerArgStr(const virDomainDef *def,
goto error;
}
- if (virDomainDiskTranslateSourcePool(conn, disk) < 0)
+ if (virDomainDiskTranslateSourcePool(disk) < 0)
goto error;
disk_source = virDomainDiskGetSource(disk);
@@ -289,12 +289,11 @@ bhyveBuildUSBControllerArgStr(const virDomainDef *def,
static int
bhyveBuildVirtIODiskArgStr(const virDomainDef *def ATTRIBUTE_UNUSED,
virDomainDiskDefPtr disk,
- virConnectPtr conn,
virCommandPtr cmd)
{
const char *disk_source;
- if (virDomainDiskTranslateSourcePool(conn, disk) < 0)
+ if (virDomainDiskTranslateSourcePool(disk) < 0)
return -1;
if (disk->device != VIR_DOMAIN_DISK_DEVICE_DISK) {
@@ -562,7 +561,7 @@ virBhyveProcessBuildBhyveCmd(virConnectPtr conn,
/* Handled by bhyveBuildAHCIControllerArgStr() */
break;
case VIR_DOMAIN_DISK_BUS_VIRTIO:
- if (bhyveBuildVirtIODiskArgStr(def, disk, conn, cmd) < 0)
+ if (bhyveBuildVirtIODiskArgStr(def, disk, cmd) < 0)
goto error;
break;
default:
@@ -672,10 +671,10 @@ virBhyveProcessBuildCustomLoaderCmd(virDomainDefPtr def)
}
static bool
-virBhyveUsableDisk(virConnectPtr conn, virDomainDiskDefPtr disk)
+virBhyveUsableDisk(virDomainDiskDefPtr disk)
{
- if (virDomainDiskTranslateSourcePool(conn, disk) < 0)
+ if (virDomainDiskTranslateSourcePool(disk) < 0)
return false;
if ((disk->device != VIR_DOMAIN_DISK_DEVICE_DISK) &&
@@ -729,7 +728,7 @@ virBhyveProcessBuildGrubbhyveCmd(virDomainDefPtr def,
* across. */
cd = hdd = userdef = NULL;
for (i = 0; i < def->ndisks; i++) {
- if (!virBhyveUsableDisk(conn, def->disks[i]))
+ if (!virBhyveUsableDisk(def->disks[i]))
continue;
diskdef = def->disks[i];
@@ -815,7 +814,7 @@ virBhyveProcessBuildGrubbhyveCmd(virDomainDefPtr def,
}
static virDomainDiskDefPtr
-virBhyveGetBootDisk(virConnectPtr conn, virDomainDefPtr def)
+virBhyveGetBootDisk(virDomainDefPtr def)
{
size_t i;
virDomainDiskDefPtr match = NULL;
@@ -851,7 +850,7 @@ virBhyveGetBootDisk(virConnectPtr conn, virDomainDefPtr def)
/* If boot_dev is set, we return the first device of
* the request type */
for (i = 0; i < def->ndisks; i++) {
- if (!virBhyveUsableDisk(conn, def->disks[i]))
+ if (!virBhyveUsableDisk(def->disks[i]))
continue;
if (def->disks[i]->device == boot_dev) {
@@ -875,7 +874,7 @@ virBhyveGetBootDisk(virConnectPtr conn, virDomainDefPtr def)
int first_usable_disk_index = -1;
for (i = 0; i < def->ndisks; i++) {
- if (!virBhyveUsableDisk(conn, def->disks[i]))
+ if (!virBhyveUsableDisk(def->disks[i]))
continue;
else
first_usable_disk_index = i;
@@ -907,7 +906,7 @@ virBhyveProcessBuildLoadCmd(virConnectPtr conn, virDomainDefPtr def,
virDomainDiskDefPtr disk = NULL;
if (def->os.bootloader == NULL) {
- disk = virBhyveGetBootDisk(conn, def);
+ disk = virBhyveGetBootDisk(def);
if (disk == NULL)
return NULL;
--
2.14.3
7 years, 1 month
[libvirt] [PATCH] tests: drop linkage to libvirt_driver_network_impl.la
by Daniel P. Berrangé
The qemuxml2argvtest does not need to link to the network driver
after this commit:
commit 0c63c117a2d17f66b05dd83e50aa36ac0b0c9843
Author: Daniel P. Berrangé <berrange(a)redhat.com>
Date: Fri Feb 9 15:08:53 2018 +0000
conf: reimplement virDomainNetResolveActualType in terms of public API
Signed-off-by: Daniel P. Berrangé <berrange(a)redhat.com>
---
Pushed as a CI build fix for OS-X
tests/Makefile.am | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/Makefile.am b/tests/Makefile.am
index d9b3a99477..09647a959d 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -583,7 +583,6 @@ qemuxml2argvtest_SOURCES = \
qemuxml2argvtest.c testutilsqemu.c testutilsqemu.h \
testutils.c testutils.h
qemuxml2argvtest_LDADD = libqemutestdriver.la \
- ../src/libvirt_driver_network_impl.la \
$(LDADDS) $(LIBXML_LIBS)
qemuxml2argvmock_la_SOURCES = \
--
2.14.3
7 years, 1 month
[libvirt] [PATCH] conf: move 'generated' member from virMacAddr to virDomainNetDef
by Laine Stump
Commit 7e62c4cd26d (first appearing in libvirt-3.9.0 as a resolution
to rhbz #1343919) added a "generated" attribute to virMacAddr that was
set whenever a mac address was auto-generated by libvirt. This
knowledge was used in a single place - when trying to match a NetDef
from the domain to delete with user-provided XML. Since the XML parser
always auto-generates a MAC address for NetDefs when none is provided,
it was previously impossible to make a search where the MAC address
wasn't significant, but the addition of the "generated" attribute made
it possible for the search function to ignore auto-generated MACs.
This implementation had a problem though - it was adding a field to a
"low level" struct - virMacAddr - which is used in other places with
the assumption that it contains exactly a 6 byte MAC address and
nothing else. In particular, virNWFilterSnoopEthHdr uses virMacAddr as
part of the definition of an ethernet packet header, whose layout must
of course match an actual ethernet packet. Adding the extra bools into
virNWFilterSnoopEthHdr caused the nwfilter driver's "IP discovery via
DHCP packet snooping" functionality to mysteriously stop working.
In order to fix that behavior, and prevent potential future similar
odd behavior, this patch moves the "generated" member out of
virMacAddr (so that it is again really just a MAC address) and into
virDomainNetDef, and sets it only when virDomainNetGenerateMAC() is
called from virDomainNetDefParseXML() (which is the only time we care
about it).
Resolves: https://bugzilla.redhat.com/1529338
(It should also be applied to any maintenance branch that applies
commit 7e62c4cd26 and friends to resolve
https://bugzilla.redhat.com/1343919)
Signed-off-by: Laine Stump <laine(a)laine.org>
---
src/conf/domain_conf.c | 3 ++-
src/conf/domain_conf.h | 1 +
src/util/virmacaddr.c | 5 -----
src/util/virmacaddr.h | 2 --
tests/bhyveargv2xmlmock.c | 1 -
5 files changed, 3 insertions(+), 9 deletions(-)
diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 3cfd6de5e0..7783a3dbef 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -11064,6 +11064,7 @@ virDomainNetDefParseXML(virDomainXMLOptionPtr xmlopt,
}
} else {
virDomainNetGenerateMAC(xmlopt, &def->mac);
+ def->mac_generated = true;
}
if (devaddr) {
@@ -16283,7 +16284,7 @@ virDomainNetFindIdx(virDomainDefPtr def, virDomainNetDefPtr net)
size_t i;
int matchidx = -1;
char mac[VIR_MAC_STRING_BUFLEN];
- bool MACAddrSpecified = !net->mac.generated;
+ bool MACAddrSpecified = !net->mac_generated;
bool PCIAddrSpecified = virDomainDeviceAddressIsValid(&net->info,
VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI);
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index e6212818aa..b0a175b4a4 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -966,6 +966,7 @@ struct _virDomainActualNetDef {
struct _virDomainNetDef {
virDomainNetType type;
virMacAddr mac;
+ bool mac_generated; /* true if mac was *just now* auto-generated by libvirt */
char *model;
union {
struct {
diff --git a/src/util/virmacaddr.c b/src/util/virmacaddr.c
index 409fdc34d5..7afe032b9c 100644
--- a/src/util/virmacaddr.c
+++ b/src/util/virmacaddr.c
@@ -107,7 +107,6 @@ void
virMacAddrSet(virMacAddrPtr dst, const virMacAddr *src)
{
memcpy(dst, src, sizeof(*src));
- dst->generated = false;
}
/**
@@ -121,7 +120,6 @@ void
virMacAddrSetRaw(virMacAddrPtr dst, const unsigned char src[VIR_MAC_BUFLEN])
{
memcpy(dst->addr, src, VIR_MAC_BUFLEN);
- dst->generated = false;
}
/**
@@ -151,7 +149,6 @@ virMacAddrParse(const char* str, virMacAddrPtr addr)
{
size_t i;
- addr->generated = false;
errno = 0;
for (i = 0; i < VIR_MAC_BUFLEN; i++) {
char *end_ptr;
@@ -220,7 +217,6 @@ virMacAddrParseHex(const char *str, virMacAddrPtr addr)
str[VIR_MAC_HEXLEN])
return -1;
- addr->generated = false;
for (i = 0; i < VIR_MAC_BUFLEN; i++)
addr->addr[i] = (virHexToBin(str[2 * i]) << 4 |
virHexToBin(str[2 * i + 1]));
@@ -236,7 +232,6 @@ void virMacAddrGenerate(const unsigned char prefix[VIR_MAC_PREFIX_BUFLEN],
addr->addr[3] = virRandomBits(8);
addr->addr[4] = virRandomBits(8);
addr->addr[5] = virRandomBits(8);
- addr->generated = true;
}
/* The low order bit of the first byte is the "multicast" bit. */
diff --git a/src/util/virmacaddr.h b/src/util/virmacaddr.h
index ef4285d639..f4f5e2ce11 100644
--- a/src/util/virmacaddr.h
+++ b/src/util/virmacaddr.h
@@ -36,8 +36,6 @@ typedef virMacAddr *virMacAddrPtr;
struct _virMacAddr {
unsigned char addr[VIR_MAC_BUFLEN];
- bool generated; /* True if MAC address was autogenerated,
- false otherwise. */
};
int virMacAddrCompare(const char *mac1, const char *mac2);
diff --git a/tests/bhyveargv2xmlmock.c b/tests/bhyveargv2xmlmock.c
index dd25f4e13a..1f08bebb7b 100644
--- a/tests/bhyveargv2xmlmock.c
+++ b/tests/bhyveargv2xmlmock.c
@@ -16,7 +16,6 @@ virMacAddrGenerate(const unsigned char prefix[VIR_MAC_PREFIX_BUFLEN],
addr->addr[3] = 0;
addr->addr[4] = 0;
addr->addr[5] = 0;
- addr->generated = true;
}
int
--
2.14.3
7 years, 1 month
[libvirt] [PATCH v3 00/10] Cleanups in QEMU driver wrt virConnectPtr
by Daniel P. Berrangé
There are many places we can stop passing around virConnectPtr now
and directly open secondary drivers where required instead.
v3:
- More migration function renames
- Style fixes
v2:
- Now with working tests !
Daniel P. Berrangé (10):
driver: allow override of connection for secondary drivers
conf: reimplement virDomainNetResolveActualType in terms of public API
qemu: stop passing virConnectPtr into qemuMonitorStartCPUs
conf: stop passing virConnectPtr into virDomainDiskTranslateSourcePool
qemu: don't pass virConnectPtr around for secrets
qemu: stop passing in virConnectPtr for looking up networks
qemu: remove virConnectPtr from some more startup code paths
qemu: remove virConnectPtr in some migration methods
qemu: don't export migration job APIs
qemu: rename migration APIs to include Src or Dst in their name
src/conf/domain_conf.c | 90 ++-
src/conf/domain_conf.h | 14 +-
src/driver.c | 184 ++++-
src/driver.h | 7 +
src/libvirt_private.syms | 6 +
src/network/bridge_driver.c | 76 +--
src/qemu/qemu_conf.c | 3 +-
src/qemu/qemu_conf.h | 3 +-
src/qemu/qemu_domain.c | 111 ++-
src/qemu/qemu_domain.h | 21 +-
src/qemu/qemu_driver.c | 324 ++++-----
src/qemu/qemu_hotplug.c | 66 +-
src/qemu/qemu_hotplug.h | 15 +-
src/qemu/qemu_migration.c | 1522 +++++++++++++++++++++---------------------
src/qemu/qemu_migration.h | 301 ++++-----
src/qemu/qemu_monitor.c | 10 +-
src/qemu/qemu_monitor.h | 11 +-
src/qemu/qemu_monitor_json.c | 3 +-
src/qemu/qemu_monitor_json.h | 3 +-
src/qemu/qemu_monitor_text.c | 9 +-
src/qemu/qemu_monitor_text.h | 3 +-
src/qemu/qemu_process.c | 154 ++---
src/qemu/qemu_process.h | 12 +-
tests/Makefile.am | 7 +-
tests/qemuhotplugtest.c | 4 +-
tests/qemumonitorjsontest.c | 2 +-
tests/qemuxml2argvtest.c | 11 +-
27 files changed, 1517 insertions(+), 1455 deletions(-)
--
2.14.3
7 years, 1 month
Re: [libvirt] Regarding libvirt patchset "Keep original security label"
by Michal Privoznik
[Adding libvirt-list as others might chime in or when somebody is
solving similar issue they can find answers in the archive.]
On 02/19/2018 10:09 AM, Toni Peltonen wrote:
> Hi,
>
> Sorry to bother you with ages old stuff like this (https://www.redhat.com/archives/libvir-list/2014-September/msg00551.html) patch set.
>
> I did some proof of concept work on my laptop during the weekend with a couple of CentOS 7 virtual machines running shared GFS2 mount (with working dlm locking) and libvirtd + QEMU guests on top of that. I am running the latest libvirtd packages from CentOS upstream.
>
> While I managed to get dlm and global POSIX locking working as expected when I added virtlockd to the picture to actually hold locks for the VM images I ended up getting some gray hair realizing that this ages old security label issue is still present. The situation is basically still the same:
>
> - On a shared filesystem (like GFS2), despite virtlockd locks working as expected, libvirtd still (as expected with current code) tries to change the ownership and security labels of the target file (QCOW2 image)
>
> - This sudden change causes the virtual machine running on the other host to drop as read-only since SELinux starts preventing all write operations for it
>
> - With SELinux in Permissive mode on the host that runs the virtual machine everything work as expected
So you have a disk that is shared between two domains? One way to avoid
the problem is to have <shareable/> in disk definition so that libvirt
doesn't restore disk labels. However, that still might not work for you
because it means that libvirt still changes labels on domain start.
So far the only way to prevent this from happening is using custom
labels https://libvirt.org/formatdomain.html#elementsDisks .
>
>
> I saw that the only code available (that I could easily find and understand) to really tackle this issue was your patch set from as early as 2014. It seems it never hit upstream though, at least I can't find any of the relevant parts in the upstream code.
>
> I am just reaching out to ask whether this patch set was left out intentionally (https://www.redhat.com/archives/libvir-list/2014-September/msg00551.html), just lost in time or if you are aware of any other work that might be in progress to tackle this issue in the upstream?
Unfortunately, the patches were abandoned and the issue is even worse. I
remember having a discussion on this topic lately (although can't recall
where). Turns out, we firstly relabel the disks and only after that we
try to obtain disk locks. So if the latter fails (e.g. because another
domain holds the locks) the lables are changed anyway. The suggested
solution was to have two locks: one for disk contents the other for disk
metadata (like security labels).
Anyway, I don't think there's anybody working on this actively. Sorry.
Michal
7 years, 1 month
[libvirt] [PATCH 0/2] Two blockjob fixes
by Peter Krempa
Peter Krempa (2):
virsh: Fix internal naming of some blockjob commands
qemu: blockcopy: Add check for bandwidth
src/qemu/qemu_driver.c | 8 +++++++
tools/virsh-domain.c | 60 +++++++++++++++++++++++++-------------------------
2 files changed, 38 insertions(+), 30 deletions(-)
--
2.15.0
7 years, 1 month