[PATCH 0 of 4] [TEST] Misc migration test fixes

This tests enable localhost migration to work with Xen guests

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID 42a5cacd8bb2b96c2a9075d331da0c567661976d # Parent 3c17b4d15e84469ed3d2307a7123c75d99415dee [TEST] #2 Move cleanup_guest_netpool() to vsmigrations.py Migration test 06 - 08 duplicate this same code. Also, if the migration is a localhost one, set the hostname to localhost. Otherwise, the providers will return an error saying the guest already exists on the target (because the providers haven't detected a localhost migration). If the target system name is localhost, the migration will always be a local migration. Be sure to set remote_migration accordingly. Update from 1 to 2: -Change destroy() to cim_destroy() Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 3c17b4d15e84 -r 42a5cacd8bb2 suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py --- a/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Fri May 22 01:41:08 2009 -0700 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsmigrations.py Thu May 21 15:50:56 2009 -0700 @@ -30,10 +30,11 @@ from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.enumclass import EnumInstances from XenKvmLib.classes import get_typed_class, virt_types -from XenKvmLib.xm_virt_util import domain_list -from XenKvmLib.const import get_provider_version +from XenKvmLib.xm_virt_util import domain_list, net_list +from XenKvmLib.const import get_provider_version, default_network_name from CimTest.Globals import logger, CIM_USER, CIM_PASS, CIM_NS, \ CIM_ERROR_ENUMERATE +from XenKvmLib.common_util import destroy_netpool # Migration constants CIM_MIGRATE_OFFLINE=1 @@ -94,9 +95,14 @@ def check_mig_support(virt, options): s_sysname = gethostbyaddr(options.ip)[0] t_sysname = gethostbyaddr(options.t_url)[0] - if virt == 'KVM' and (t_sysname == s_sysname or t_sysname in s_sysname): - logger.info("Libvirt does not support local migration for KVM") - return SKIP, s_sysname, t_sysname + + if t_sysname == s_sysname or t_sysname in s_sysname: + if virt == 'KVM': + logger.info("Libvirt does not support local migration for KVM") + return SKIP, s_sysname, t_sysname + + #localhost migration is supported by Xen + return PASS, s_sysname, "localhost" return PASS, s_sysname, t_sysname @@ -364,12 +370,15 @@ logger.error("Guest to be migrated not specified.") return FAIL + if t_sysname == "localhost": + remote_migrate = 0 + try: if remote_migrate == 1: - status, req_image, backup_image = remote_copy_guest_image(virt, - s_sysname, - t_sysname, - guest_name) + status, req_image, bkup_image = remote_copy_guest_image(virt, + s_sysname, + t_sysname, + guest_name) if status != PASS: raise Exception("Failure from remote_copy_guest_image()") @@ -397,7 +406,10 @@ logger.info("Migrating '%s'.. this will take some time.", guest_name) # Migrate the guest to t_sysname - status, ret = migrate_guest_to_host(vsmservice, guest_ref, t_sysname, msd) + status, ret = migrate_guest_to_host(vsmservice, + guest_ref, + t_sysname, + msd) if status == FAIL: raise Exception("Failed to Migrate guest '%s' from '%s' to '%s'" \ % (guest_name, s_sysname, t_sysname)) @@ -413,5 +425,50 @@ logger.error("Exception details %s", details) status = FAIL - cleanup_image(backup_image, req_image, t_sysname, remote_migrate=1) + if remote_migrate == 1: + cleanup_image(bkup_image, req_image, t_sysname, remote_migrate=1) + return status + +def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): + # Clean the domain on target machine. + # This is req when migration is successful, also when migration is not + # completely successful VM might be created on the target machine + # and hence need to clean. + target_list = domain_list(t_sysname, virt) + if target_list != None and test_dom in target_list: + ret_value = cxml.cim_destroy(t_sysname) + if not ret_value: + logger.info("Failed to destroy the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + ret_value = cxml.undefine(t_sysname) + if not ret_value: + logger.info("Failed to undefine the migrated domain '%s' on '%s'", + test_dom, t_sysname) + + # Done cleaning environment + if t_sysname == "localhost": + return + + # Remote Migration not Successful, clean the domain on src machine + src_list = domain_list(s_sysname, virt) + if src_list != None and test_dom in src_list: + ret_value = cxml.cim_destroy(s_sysname) + if not ret_value: + logger.info("Failed to destroy the domain '%s' on the source '%s'", + test_dom, s_sysname) + + ret_value = cxml.undefine(s_sysname) + if not ret_value: + logger.info("Failed to undefine the domain '%s' on source '%s'", + test_dom, s_sysname) + + # clean the networkpool created on the remote machine + target_net_list = net_list(t_sysname, virt) + if target_net_list != None and default_network_name in target_net_list: + ret_value = destroy_netpool(t_sysname, virt, default_network_name) + if ret_value != PASS: + logger.info("Unable to destroy networkpool '%s' on '%s'", + default_network_name, t_sysname) +

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID fdb53fac065cd041292fbc9b6dc0dcd712b99b0b # Parent 42a5cacd8bb2b96c2a9075d331da0c567661976d [TEST] 06 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r 42a5cacd8bb2 -r fdb53fac065c suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/06_remote_live_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -67,45 +67,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.destroy(t_sysname) - if not ret_value: - logger.info("Failed to destroy the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.cim_destroy(s_sysname) - if not ret_value: - logger.info("Failed to destroy the domain '%s' on the source '%s'", - test_dom, s_sysname) - - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID e21a60339e4be98d049db5ea146377e90a1f006b # Parent fdb53fac065cd041292fbc9b6dc0dcd712b99b0b [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r fdb53fac065c -r e21a60339e4b suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -60,35 +60,6 @@ return PASS, cxml -def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options

The changes for the patch looks fine except when we run the 07_remote_offline_migration.py tc we get an message in the cimtest.log like below: Wed, 01 Jul 2009 07:31:07:TEST LOG:INFO - Failed to undefine the migrated domain 'VM_frm_elm3b25.beaverton.ibm.com' on 'localhost' This is beacuse here in the tc we only define the VM but when we call cleanup_guest_netpool() we destroy and undefine() the VM. Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID e21a60339e4be98d049db5ea146377e90a1f006b # Parent fdb53fac065cd041292fbc9b6dc0dcd712b99b0b [TEST] 07 - Remove cleanup_guest_netpool() def and call it from vsmigration.py
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r fdb53fac065c -r e21a60339e4b suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/07_remote_offline_migration.py Thu May 21 15:50:56 2009 -0700 @@ -35,9 +35,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf
sup_types = ['KVM', 'Xen']
@@ -60,35 +60,6 @@
return PASS, cxml
-def cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname): - # Clean the domain on target machine. - # This is req when migration is successful, also when migration is not - # completely successful VM might be created on the target machine - # and hence need to clean. - target_list = domain_list(t_sysname, virt) - if target_list != None and test_dom in target_list: - ret_value = cxml.undefine(t_sysname) - if not ret_value: - logger.info("Failed to undefine the migrated domain '%s' on '%s'", - test_dom, t_sysname) - - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and default_network_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, default_network_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - default_network_name, t_sysname) - - # Remote Migration not Successful, clean the domain on src machine - src_list = domain_list(s_sysname, virt) - if src_list != None and test_dom in src_list: - ret_value = cxml.undefine(s_sysname) - if not ret_value: - logger.info("Failed to undefine the domain '%s' on source '%s'", - test_dom, s_sysname) - - @do_main(sup_types) def main(): options = main.options
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID 401fcf44005919231ce6ea590029aff42e910152 # Parent e21a60339e4be98d049db5ea146377e90a1f006b [TEST] #2 VSMS 08 Remove cleanup_guest_netpool() def and call it from vsmigration.py Updates: -Give guest time to fuly boot before rebooting -Indicate the pass / failure of both the restart and resume cases Note: This test might fail with older versions of Xen - restart migration can be unstable Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com> diff -r e21a60339e4b -r 401fcf440059 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 @@ -28,6 +28,7 @@ import sys import os +from time import sleep from socket import gethostname from XenKvmLib import vxml from XenKvmLib.xm_virt_util import domain_list, net_list @@ -35,9 +36,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf sup_types = ['KVM', 'Xen'] @@ -125,6 +126,9 @@ status_resume = status_restart = None cxml = None + status_restart = -1 + status_resume = -1 + try: for mig_type in mig_types: @@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status + # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15) + # create the networkpool used in the domain to be migrated # on the target machine. t_net_list = net_list(t_sysname, virt) @@ -168,18 +176,14 @@ cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) status = FAIL - # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and net_pool_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - net_pool_name, t_sysname) + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname) - if status_restart != PASS or status_resume != PASS: + if status_restart == PASS and status_resume == PASS: + status = PASS + else: + logger.error("Restart migration %d", status_restart) + logger.error("Resume migration %d", status_resume) status = FAIL - else: - status = PASS logger.info("Test case %s", str_status(status)) return status

Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID 401fcf44005919231ce6ea590029aff42e910152 # Parent e21a60339e4be98d049db5ea146377e90a1f006b [TEST] #2 VSMS 08 Remove cleanup_guest_netpool() def and call it from vsmigration.py
Updates: -Give guest time to fuly boot before rebooting -Indicate the pass / failure of both the restart and resume cases
Note: This test might fail with older versions of Xen - restart migration can be unstable
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r e21a60339e4b -r 401fcf440059 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 @@ -28,6 +28,7 @@
import sys import os +from time import sleep from socket import gethostname from XenKvmLib import vxml from XenKvmLib.xm_virt_util import domain_list, net_list @@ -35,9 +36,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf
sup_types = ['KVM', 'Xen']
@@ -125,6 +126,9 @@ status_resume = status_restart = None cxml = None
+ status_restart = -1 + status_resume = -1 + try:
for mig_type in mig_types: @@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status
+ # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15)
You can increase the timeout period in poll_for_state_change(). Did increasing the timeout period for poll_for_state_change() did not work for you ?
+ # create the networkpool used in the domain to be migrated # on the target machine. t_net_list = net_list(t_sysname, virt) @@ -168,18 +176,14 @@ cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) status = FAIL
- # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and net_pool_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - net_pool_name, t_sysname) + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname)
- if status_restart != PASS or status_resume != PASS: + if status_restart == PASS and status_resume == PASS: + status = PASS + else: + logger.error("Restart migration %d", status_restart) + logger.error("Resume migration %d", status_resume) status = FAIL - else: - status = PASS
logger.info("Test case %s", str_status(status)) return status
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

@@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status
+ # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15)
You can increase the timeout period in poll_for_state_change(). Did increasing the timeout period for poll_for_state_change() did not work for you ?
The issue here is a slightly different.. in poll_for_state_change(), we poll until libvirt reports that the guest is active. However, libvirt reports that the guest is active once the guest starts the boot process. So, pool_for_state_change() will stop polling, but the guest may not have finished the boot sequence. At that point, we attempt to launch a restart migration - this means that we attempt initiate a guest shutdown before the guest is even booted. For a guest shutdown, libvirt sends an ACPI event to the guest, but if the guest isn't fully booted, it won't be able to detect that event. So we need to give the OS a chance to boot before asking for a shutdown. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
@@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status
+ # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15)
You can increase the timeout period in poll_for_state_change(). Did increasing the timeout period for poll_for_state_change() did not work for you ?
The issue here is a slightly different.. in poll_for_state_change(), we poll until libvirt reports that the guest is active. However, libvirt reports that the guest is active once the guest starts the boot process. So, pool_for_state_change() will stop polling, but the guest may not have finished the boot sequence.
At that point, we attempt to launch a restart migration - this means that we attempt initiate a guest shutdown before the guest is even booted. For a guest shutdown, libvirt sends an ACPI event to the guest, but if the guest isn't fully booted, it won't be able to detect that event.
So we need to give the OS a chance to boot before asking for a shutdown. Oh! this makes sense then...
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

The test passed for the Restart migration option, but failed for resume migration: Wed, 01 Jul 2009 07:35:52:TEST LOG:INFO - 'restart' Migration for 'VM_frm_elm3b25.beaverton.ibm.com PASSED' Wed, 01 Jul 2009 07:35:53:TEST LOG:INFO - Executing 'resume' migration for 'VM_frm_elm3b25.beaverton.ibm.com' from 'elm3b25.beaverton.ibm.com' to 'localhost' Wed, 01 Jul 2009 07:36:12:TEST LOG:INFO - Migrating 'VM_frm_elm3b25.beaverton.ibm.com'.. this will take some time. Wed, 01 Jul 2009 07:36:17:TEST LOG:ERROR - JobStatus for dom 'VM_frm_elm3b25.beaverton.ibm.com' has 'Migration Failed: Error 0' instead of 'Completed' Wed, 01 Jul 2009 07:36:17:TEST LOG:INFO - 'resume' Migration for 'VM_frm_elm3b25.beaverton.ibm.com FAILED' Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Restart migration 0 Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Resume migration 1 Wed, 01 Jul 2009 07:36:18:TEST LOG:INFO - Test case FAILED Kaitlin Rupert wrote:
# HG changeset patch # User Kaitlin Rupert <karupert@us.ibm.com> # Date 1242946256 25200 # Node ID 401fcf44005919231ce6ea590029aff42e910152 # Parent e21a60339e4be98d049db5ea146377e90a1f006b [TEST] #2 VSMS 08 Remove cleanup_guest_netpool() def and call it from vsmigration.py
Updates: -Give guest time to fuly boot before rebooting -Indicate the pass / failure of both the restart and resume cases
Note: This test might fail with older versions of Xen - restart migration can be unstable
Signed-off-by: Kaitlin Rupert <karupert@us.ibm.com>
diff -r e21a60339e4b -r 401fcf440059 suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py --- a/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 +++ b/suites/libvirt-cim/cimtest/VirtualSystemMigrationService/08_remote_restart_resume_migration.py Thu May 21 15:50:56 2009 -0700 @@ -28,6 +28,7 @@
import sys import os +from time import sleep from socket import gethostname from XenKvmLib import vxml from XenKvmLib.xm_virt_util import domain_list, net_list @@ -35,9 +36,9 @@ from XenKvmLib.const import do_main, default_network_name from CimTest.ReturnCodes import PASS, FAIL, SKIP from XenKvmLib.classes import get_typed_class -from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate -from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf,\ - destroy_netpool +from XenKvmLib.vsmigrations import check_mig_support, local_remote_migrate, \ + cleanup_guest_netpool +from XenKvmLib.common_util import poll_for_state_change, create_netpool_conf
sup_types = ['KVM', 'Xen']
@@ -125,6 +126,9 @@ status_resume = status_restart = None cxml = None
+ status_restart = -1 + status_resume = -1 + try:
for mig_type in mig_types: @@ -135,6 +139,10 @@ logger.error("Error setting up the guest") return status
+ # Generally, having a test sleep is a bad choice, but we need to + # give the guest some time to fully boot before we reboot it + sleep(15) + # create the networkpool used in the domain to be migrated # on the target machine. t_net_list = net_list(t_sysname, virt) @@ -168,18 +176,14 @@ cleanup_guest(virt, cxml, test_dom, t_sysname, s_sysname) status = FAIL
- # clean the networkpool created on the remote machine - target_net_list = net_list(t_sysname, virt) - if target_net_list != None and net_pool_name in target_net_list: - ret_value = destroy_netpool(t_sysname, virt, net_pool_name) - if ret_value != PASS: - logger.info("Unable to destroy networkpool '%s' on '%s'", - net_pool_name, t_sysname) + cleanup_guest_netpool(virt, cxml, test_dom, t_sysname, s_sysname)
- if status_restart != PASS or status_resume != PASS: + if status_restart == PASS and status_resume == PASS: + status = PASS + else: + logger.error("Restart migration %d", status_restart) + logger.error("Resume migration %d", status_resume) status = FAIL - else: - status = PASS
logger.info("Test case %s", str_status(status)) return status
_______________________________________________ Libvirt-cim mailing list Libvirt-cim@redhat.com https://www.redhat.com/mailman/listinfo/libvirt-cim
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com

Deepti B Kalakeri wrote:
The test passed for the Restart migration option, but failed for resume migration:
Wed, 01 Jul 2009 07:35:52:TEST LOG:INFO - 'restart' Migration for 'VM_frm_elm3b25.beaverton.ibm.com PASSED'
Wed, 01 Jul 2009 07:35:53:TEST LOG:INFO - Executing 'resume' migration for 'VM_frm_elm3b25.beaverton.ibm.com' from 'elm3b25.beaverton.ibm.com' to 'localhost' Wed, 01 Jul 2009 07:36:12:TEST LOG:INFO - Migrating 'VM_frm_elm3b25.beaverton.ibm.com'.. this will take some time. Wed, 01 Jul 2009 07:36:17:TEST LOG:ERROR - JobStatus for dom 'VM_frm_elm3b25.beaverton.ibm.com' has 'Migration Failed: Error 0' instead of 'Completed' Wed, 01 Jul 2009 07:36:17:TEST LOG:INFO - 'resume' Migration for 'VM_frm_elm3b25.beaverton.ibm.com FAILED'
Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Restart migration 0 Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Resume migration 1 Wed, 01 Jul 2009 07:36:18:TEST LOG:INFO - Test case FAILED
Hmm.. can you include some provider debug and verify that your system is configured with Xen relocation enabled. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

+1 works for me. Kaitlin Rupert wrote:
Deepti B Kalakeri wrote:
The test passed for the Restart migration option, but failed for resume migration:
Wed, 01 Jul 2009 07:35:52:TEST LOG:INFO - 'restart' Migration for 'VM_frm_elm3b25.beaverton.ibm.com PASSED'
Wed, 01 Jul 2009 07:35:53:TEST LOG:INFO - Executing 'resume' migration for 'VM_frm_elm3b25.beaverton.ibm.com' from 'elm3b25.beaverton.ibm.com' to 'localhost' Wed, 01 Jul 2009 07:36:12:TEST LOG:INFO - Migrating 'VM_frm_elm3b25.beaverton.ibm.com'.. this will take some time. Wed, 01 Jul 2009 07:36:17:TEST LOG:ERROR - JobStatus for dom 'VM_frm_elm3b25.beaverton.ibm.com' has 'Migration Failed: Error 0' instead of 'Completed' Wed, 01 Jul 2009 07:36:17:TEST LOG:INFO - 'resume' Migration for 'VM_frm_elm3b25.beaverton.ibm.com FAILED'
Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Restart migration 0 Wed, 01 Jul 2009 07:36:18:TEST LOG:ERROR - Resume migration 1 Wed, 01 Jul 2009 07:36:18:TEST LOG:INFO - Test case FAILED
Hmm.. can you include some provider debug and verify that your system is configured with Xen relocation enabled.
-- Thanks and Regards, Deepti B. Kalakeri IBM Linux Technology Center deeptik@linux.vnet.ibm.com
participants (2)
-
Deepti B Kalakeri
-
Kaitlin Rupert