[PATCH] [TEST] #2 LXC support using DefineSystem & VSMS.01

# HG changeset patch # User Zhengang Li <lizg@cn.ibm.com> # Date 1212133201 -28800 # Node ID ddb52ec53368860a18b7e3863b38ea31cb163069 # Parent 5c77329cb53e6340cd6ddbf9c044462fb994eb88 [TEST] #2 LXC support using DefineSystem & VSMS.01 Updates: add the missing address property Signed-off-by: Zhengang Li <lizg@cn.ibm.com> diff -r 5c77329cb53e -r ddb52ec53368 suites/libvirt-cim/cimtest/VirtualSystemManagementService/01_definesystem_name.py --- a/suites/libvirt-cim/cimtest/VirtualSystemManagementService/01_definesystem_name.py Fri May 30 14:26:38 2008 +0800 +++ b/suites/libvirt-cim/cimtest/VirtualSystemManagementService/01_definesystem_name.py Fri May 30 15:40:01 2008 +0800 @@ -30,7 +30,7 @@ from CimTest.Globals import do_main from CimTest.Globals import logger -SUPPORTED_TYPES = ['Xen', 'KVM', 'XenFV'] +SUPPORTED_TYPES = ['Xen', 'KVM', 'XenFV', 'LXC'] default_dom = 'test_domain' @do_main(SUPPORTED_TYPES) diff -r 5c77329cb53e -r ddb52ec53368 suites/libvirt-cim/lib/XenKvmLib/const.py --- a/suites/libvirt-cim/lib/XenKvmLib/const.py Fri May 30 14:26:38 2008 +0800 +++ b/suites/libvirt-cim/lib/XenKvmLib/const.py Fri May 30 15:40:01 2008 +0800 @@ -85,3 +85,4 @@ #vxml.LXCXML LXC_init_path = os.path.join(_image_dir, 'cimtest_lxc_init') LXC_default_tty = '/dev/ptmx' +LXC_default_mp = '/tmp' diff -r 5c77329cb53e -r ddb52ec53368 suites/libvirt-cim/lib/XenKvmLib/vsms.py --- a/suites/libvirt-cim/lib/XenKvmLib/vsms.py Fri May 30 14:26:38 2008 +0800 +++ b/suites/libvirt-cim/lib/XenKvmLib/vsms.py Fri May 30 15:40:01 2008 +0800 @@ -102,6 +102,8 @@ self.isFullVirt = (type == 'KVM' or virt == 'XenFV') if self.isFullVirt: self.BootDevice = 'hd' + elif type == 'LXC': + self.InitPath = const.LXC_init_path else: self.Bootloader = live.bootloader(Globals.CIM_IP, 0) self.BootloaderArgs = '' @@ -113,6 +115,9 @@ pass class KVM_VirtualSystemSettingData(CIM_VirtualSystemSettingData): + pass + +class LXC_VirtualSystemSettingData(CIM_VirtualSystemSettingData): pass @eval_cls('VirtualSystemSettingData') @@ -134,6 +139,12 @@ class KVM_DiskResourceAllocationSettingData(CIM_DiskResourceAllocationSettingData): pass + +class LXC_DiskResourceAllocationSettingData(CIMClassMOF): + def __init__(self, mountpoint, source, name): + self.MountPoint = mountpoint + self.Address = source + self.InstanceID = '%s/%s' % (name, mountpoint) @eval_cls('DiskResourceAllocationSettingData') def get_dasd_class(virt): @@ -157,6 +168,9 @@ class KVM_NetResourceAllocationSettingData(CIM_NetResourceAllocationSettingData): pass +class LXC_NetResourceAllocationSettingData(CIM_NetResourceAllocationSettingData): + pass + @eval_cls('NetResourceAllocationSettingData') def get_nasd_class(virt): pass @@ -175,6 +189,9 @@ pass class KVM_ProcResourceAllocationSettingData(CIM_ProcResourceAllocationSettingData): + pass + +class LXC_ProcResourceAllocationSettingData(CIM_ProcResourceAllocationSettingData): pass @eval_cls('ProcResourceAllocationSettingData') @@ -197,6 +214,9 @@ class KVM_MemResourceAllocationSettingData(CIM_MemResourceAllocationSettingData): pass +class LXC_MemResourceAllocationSettingData(CIM_MemResourceAllocationSettingData): + pass + @eval_cls('MemResourceAllocationSettingData') def get_masd_class(virt): pass @@ -212,17 +232,25 @@ class_vssd = get_vssd_class(virt) vssd = class_vssd(name=dom_name, virt=virt) - class_dasd = get_dasd_class(virt) - if virt == 'KVM': - disk_dev = 'hda' - disk_source = const.KVM_disk_path - elif virt == 'XenFV': - disk_dev = 'hda' - disk_source = const.XenFV_disk_path - d = class_dasd( - dev=disk_dev, - source=disk_source, - name=dom_name) + # LXC only takes disk and memory device for now. + # Only disk __init__ takes different params. + if virt == 'LXC': + d = LXC_DiskResourceAllocationSettingData( + mountpoint=const.LXC_default_mp, + source=const.LXC_default_mp, name=dom_name) + else: + class_dasd = get_dasd_class(virt) + if virt == 'KVM': + disk_dev = 'hda' + disk_source = const.KVM_disk_path + elif virt == 'XenFV': + disk_dev = 'hda' + disk_source = const.XenFV_disk_path + d = class_dasd( + dev=disk_dev, + source=disk_source, + name=dom_name) + class_nasd = get_nasd_class(virt) if virt == 'KVM': net_mac= const.KVM_default_mac

+ # LXC only takes disk and memory device for now. + # Only disk __init__ takes different params. + if virt == 'LXC': + d = LXC_DiskResourceAllocationSettingData( + mountpoint=const.LXC_default_mp, + source=const.LXC_default_mp, name=dom_name)
Instead of using the same directory for the mountpoint and the source. You could create a directory such as /tmp/lxc_files to mount on /tmp. That way we can distinguish between the two.
+ else: + class_dasd = get_dasd_class(virt) + if virt == 'KVM': + disk_dev = 'hda' + disk_source = const.KVM_disk_path + elif virt == 'XenFV': + disk_dev = 'hda' + disk_source = const.XenFV_disk_path + d = class_dasd( + dev=disk_dev, + source=disk_source, + name=dom_name) + class_nasd = get_nasd_class(virt)
We'll need to skip over network and processor for now, since they aren't supported. I see the following error: ERROR - Unexpected rc code 1 and description: CIM_ERR_FAILED: ResourceSettings Error: Resource type not supported on this platform InvokeMethod(DefineSystem): CIM_ERR_FAILED: ResourceSettings Error: Resource type not supported on this platform -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com

Kaitlin Rupert wrote:
+ # LXC only takes disk and memory device for now. + # Only disk __init__ takes different params. + if virt == 'LXC': + d = LXC_DiskResourceAllocationSettingData( + mountpoint=const.LXC_default_mp, + source=const.LXC_default_mp, name=dom_name)
Instead of using the same directory for the mountpoint and the source. You could create a directory such as /tmp/lxc_files to mount on /tmp. That way we can distinguish between the two. I'll add this in the next revision.
+ else: + class_dasd = get_dasd_class(virt) + if virt == 'KVM': + disk_dev = 'hda' + disk_source = const.KVM_disk_path + elif virt == 'XenFV': + disk_dev = 'hda' + disk_source = const.XenFV_disk_path + d = class_dasd( + dev=disk_dev, + source=disk_source, + name=dom_name) + class_nasd = get_nasd_class(virt)
We'll need to skip over network and processor for now, since they aren't supported.
I tried with 'None' and empty values to the network and processor mof-string. Pywbem had a problem parsing them. How do we skip the two devices?
I see the following error:
ERROR - Unexpected rc code 1 and description: CIM_ERR_FAILED: ResourceSettings Error: Resource type not supported on this platform InvokeMethod(DefineSystem): CIM_ERR_FAILED: ResourceSettings Error: Resource type not supported on this platform
-- - Zhengang

Zhengang Li wrote:
Kaitlin Rupert wrote:
+ # LXC only takes disk and memory device for now. + # Only disk __init__ takes different params. + if virt == 'LXC': + d = LXC_DiskResourceAllocationSettingData( + mountpoint=const.LXC_default_mp, + source=const.LXC_default_mp, name=dom_name)
Instead of using the same directory for the mountpoint and the source. You could create a directory such as /tmp/lxc_files to mount on /tmp. That way we can distinguish between the two. I'll add this in the next revision.
Sure - no problem.
+ else: + class_dasd = get_dasd_class(virt) + if virt == 'KVM': + disk_dev = 'hda' + disk_source = const.KVM_disk_path + elif virt == 'XenFV': + disk_dev = 'hda' + disk_source = const.XenFV_disk_path + d = class_dasd( + dev=disk_dev, + source=disk_source, + name=dom_name) + class_nasd = get_nasd_class(virt)
We'll need to skip over network and processor for now, since they aren't supported.
I tried with 'None' and empty values to the network and processor mof-string. Pywbem had a problem parsing them. How do we skip the two devices?
You don't need to specify RASDs for every device. So, you can return something like the following for containers: return vssd.mof(), [d.mof(), m.mof()] So, you could reorder things so that disk and memory are created first, then check if the virt type is LXC and return. Or you can just skip over the network and proc RASD creation in the case of LXC. And then return accordingly at the end. Either way is fine. I think the latter might be useful - it'll be easier to enable containers for network/processor should that kind of support come available. -- Kaitlin Rupert IBM Linux Technology Center kaitlin@linux.vnet.ibm.com
participants (3)
-
Kaitlin Rupert
-
Zhengang Li
-
zli@linux.vnet.ibm.com