[libvirt] [PATCH] Extend l3 cache to nodeinfo

This patch extends l3 cache infomation to nodeinfo output. Signed-off-by: Eli Qiao <liyong.qiao@intel.com> --- include/libvirt/libvirt-host.h | 1 + src/nodeinfo.c | 3 ++- src/remote/remote_protocol.x | 1 + src/test/test_driver.c | 1 + src/util/virhostcpu.c | 29 +++++++++++++++++++++++++---- src/util/virhostcpu.h | 3 ++- src/util/virhostcpupriv.h | 3 ++- tests/virhostcputest.c | 3 ++- tools/virsh-host.c | 1 + 9 files changed, 37 insertions(+), 8 deletions(-) diff --git a/include/libvirt/libvirt-host.h b/include/libvirt/libvirt-host.h index 07b5d15..ba926df 100644 --- a/include/libvirt/libvirt-host.h +++ b/include/libvirt/libvirt-host.h @@ -167,6 +167,7 @@ struct _virNodeInfo { processors in case of unusual NUMA topology*/ unsigned int threads; /* number of threads per core, 1 in case of unusual numa topology */ + unsigned int l3_cache; /* l3 cache in kilobytes */ }; /** diff --git a/src/nodeinfo.c b/src/nodeinfo.c index f2ded02..f54972b 100644 --- a/src/nodeinfo.c +++ b/src/nodeinfo.c @@ -152,7 +152,8 @@ nodeGetInfo(virNodeInfoPtr nodeinfo) if (virHostCPUGetInfo(hostarch, &nodeinfo->cpus, &nodeinfo->mhz, &nodeinfo->nodes, &nodeinfo->sockets, - &nodeinfo->cores, &nodeinfo->threads) < 0) + &nodeinfo->cores, &nodeinfo->threads, + &nodeinfo->l3_cache) < 0) return -1; return 0; diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index b846ef2..6a16b4e 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -489,6 +489,7 @@ struct remote_node_get_info_ret { /* insert@1 */ int sockets; int cores; int threads; + int l3_cache; }; struct remote_connect_get_capabilities_ret { diff --git a/src/test/test_driver.c b/src/test/test_driver.c index de92a01..b49c07b 100644 --- a/src/test/test_driver.c +++ b/src/test/test_driver.c @@ -138,6 +138,7 @@ static const virNodeInfo defaultNodeInfo = { 2, 2, 2, + 4096, }; static void diff --git a/src/util/virhostcpu.c b/src/util/virhostcpu.c index f29f312..698813b 100644 --- a/src/util/virhostcpu.c +++ b/src/util/virhostcpu.c @@ -530,7 +530,8 @@ virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, unsigned int *nodes, unsigned int *sockets, unsigned int *cores, - unsigned int *threads) + unsigned int *threads, + unsigned int *l3_cache) { virBitmapPtr present_cpus_map = NULL; virBitmapPtr online_cpus_map = NULL; @@ -546,7 +547,7 @@ virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, int direrr; *mhz = 0; - *cpus = *nodes = *sockets = *cores = *threads = 0; + *cpus = *nodes = *sockets = *cores = *threads = *l3_cache = 0; /* Start with parsing CPU clock speed from /proc/cpuinfo */ while (fgets(line, sizeof(line), cpuinfo) != NULL) { @@ -571,6 +572,24 @@ virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, (*p == '\0' || *p == '.' || c_isspace(*p))) *mhz = ui; } + if (STRPREFIX(buf, "cache size")) { + char *p; + unsigned int ui; + buf += 10; + + while (*buf && c_isspace(*buf)) + buf++; + + if (*buf != ':' || !buf[1]) { + virReportError(VIR_ERR_INTERNAL_ERROR, "%s", + _("parsing cache size from cpuinfo")); + goto cleanup; + } + + if (virStrToLong_ui(buf+1, &p, 10, &ui) == 0 && + *(p+1)=='K' && *(p+2)=='B') + *l3_cache = ui; + } } else if (ARCH_IS_PPC(arch)) { char *buf = line; if (STRPREFIX(buf, "clock")) { @@ -960,7 +979,8 @@ virHostCPUGetInfo(virArch hostarch ATTRIBUTE_UNUSED, unsigned int *nodes ATTRIBUTE_UNUSED, unsigned int *sockets ATTRIBUTE_UNUSED, unsigned int *cores ATTRIBUTE_UNUSED, - unsigned int *threads ATTRIBUTE_UNUSED) + unsigned int *threads ATTRIBUTE_UNUSED, + unsigned int *l3_cache ATTRIBUTE_UNUSED) { #ifdef __linux__ int ret = -1; @@ -974,7 +994,8 @@ virHostCPUGetInfo(virArch hostarch ATTRIBUTE_UNUSED, ret = virHostCPUGetInfoPopulateLinux(cpuinfo, hostarch, cpus, mhz, nodes, - sockets, cores, threads); + sockets, cores, threads, + l3_cache); if (ret < 0) goto cleanup; diff --git a/src/util/virhostcpu.h b/src/util/virhostcpu.h index 39f7cf8..fc579fe 100644 --- a/src/util/virhostcpu.h +++ b/src/util/virhostcpu.h @@ -50,7 +50,8 @@ int virHostCPUGetInfo(virArch hostarch, unsigned int *nodes, unsigned int *sockets, unsigned int *cores, - unsigned int *threads); + unsigned int *threads, + unsigned int *l3_cache); int virHostCPUGetKVMMaxVCPUs(void); diff --git a/src/util/virhostcpupriv.h b/src/util/virhostcpupriv.h index de30983..69d5e34 100644 --- a/src/util/virhostcpupriv.h +++ b/src/util/virhostcpupriv.h @@ -34,7 +34,8 @@ int virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, unsigned int *nodes, unsigned int *sockets, unsigned int *cores, - unsigned int *threads); + unsigned int *threads, + unsigned int *l3_cache); int virHostCPUGetStatsLinux(FILE *procstat, int cpuNum, diff --git a/tests/virhostcputest.c b/tests/virhostcputest.c index 8387956..2b2a680 100644 --- a/tests/virhostcputest.c +++ b/tests/virhostcputest.c @@ -44,7 +44,8 @@ linuxTestCompareFiles(const char *cpuinfofile, if (virHostCPUGetInfoPopulateLinux(cpuinfo, arch, &nodeinfo.cpus, &nodeinfo.mhz, &nodeinfo.nodes, &nodeinfo.sockets, - &nodeinfo.cores, &nodeinfo.threads) < 0) { + &nodeinfo.cores, &nodeinfo.threads, + &nodeinfo.l3_cache) < 0) { if (virTestGetDebug()) { if (virGetLastError()) VIR_TEST_DEBUG("\n%s\n", virGetLastErrorMessage()); diff --git a/tools/virsh-host.c b/tools/virsh-host.c index 24ebde2..2b85372 100644 --- a/tools/virsh-host.c +++ b/tools/virsh-host.c @@ -671,6 +671,7 @@ cmdNodeinfo(vshControl *ctl, const vshCmd *cmd ATTRIBUTE_UNUSED) vshPrint(ctl, "%-20s %d\n", _("Thread(s) per core:"), info.threads); vshPrint(ctl, "%-20s %d\n", _("NUMA cell(s):"), info.nodes); vshPrint(ctl, "%-20s %lu KiB\n", _("Memory size:"), info.memory); + vshPrint(ctl, "%-20s %d KiB\n", _("L3 cache size:"), info.l3_cache); return true; } -- 1.9.1

On Tue, Jan 10, 2017 at 04:11:03PM +0800, Eli Qiao wrote:
This patch extends l3 cache infomation to nodeinfo output.
Signed-off-by: Eli Qiao <liyong.qiao@intel.com> --- include/libvirt/libvirt-host.h | 1 + src/nodeinfo.c | 3 ++- src/remote/remote_protocol.x | 1 + src/test/test_driver.c | 1 + src/util/virhostcpu.c | 29 +++++++++++++++++++++++++---- src/util/virhostcpu.h | 3 ++- src/util/virhostcpupriv.h | 3 ++- tests/virhostcputest.c | 3 ++- tools/virsh-host.c | 1 + 9 files changed, 37 insertions(+), 8 deletions(-)
diff --git a/include/libvirt/libvirt-host.h b/include/libvirt/libvirt-host.h index 07b5d15..ba926df 100644 --- a/include/libvirt/libvirt-host.h +++ b/include/libvirt/libvirt-host.h @@ -167,6 +167,7 @@ struct _virNodeInfo { processors in case of unusual NUMA topology*/ unsigned int threads; /* number of threads per core, 1 in case of unusual numa topology */ + unsigned int l3_cache; /* l3 cache in kilobytes */ };
NACK, it is *forbidden* to change public structs as this breaks ABI compatibility.
diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x index b846ef2..6a16b4e 100644 --- a/src/remote/remote_protocol.x +++ b/src/remote/remote_protocol.x @@ -489,6 +489,7 @@ struct remote_node_get_info_ret { /* insert@1 */ int sockets; int cores; int threads; + int l3_cache; };
Likewise this breaks RPC compatibility. This info wil need to be reported in the capabilities XML instead. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

Hi Daniel, Thanks for your comments. I will not change public ABI nor break RPC. I wonder if you would agree with extend virHostCPUGetInfoPopulateLinux to get l3 cache size ? Such as: virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, virArch arch, unsigned int *cpus, unsigned int *mhz, unsigned int *nodes, unsigned int *sockets, unsigned int *cores, unsigned int *threads, unsigned int *l3_cache) ←--------- virHostCPUGetInfoPopulateLinux is doing parsing /proc/cpuinfo , so I think it’s the best way to get l3 cache size. cat /proc/cpuinfo … cpu MHz : 2821.435 cache size : 56320 KB … Best Regards Eli Qiao(乔立勇)OpenStack Core team OTC Intel. -- On 10/01/2017, 5:44 PM, "Daniel P. Berrange" <berrange@redhat.com> wrote: On Tue, Jan 10, 2017 at 04:11:03PM +0800, Eli Qiao wrote: > This patch extends l3 cache infomation to nodeinfo output. > > Signed-off-by: Eli Qiao <liyong.qiao@intel.com> > --- > include/libvirt/libvirt-host.h | 1 + > src/nodeinfo.c | 3 ++- > src/remote/remote_protocol.x | 1 + > src/test/test_driver.c | 1 + > src/util/virhostcpu.c | 29 +++++++++++++++++++++++++---- > src/util/virhostcpu.h | 3 ++- > src/util/virhostcpupriv.h | 3 ++- > tests/virhostcputest.c | 3 ++- > tools/virsh-host.c | 1 + > 9 files changed, 37 insertions(+), 8 deletions(-) > > diff --git a/include/libvirt/libvirt-host.h b/include/libvirt/libvirt-host.h > index 07b5d15..ba926df 100644 > --- a/include/libvirt/libvirt-host.h > +++ b/include/libvirt/libvirt-host.h > @@ -167,6 +167,7 @@ struct _virNodeInfo { > processors in case of unusual NUMA topology*/ > unsigned int threads; /* number of threads per core, 1 in case of > unusual numa topology */ > + unsigned int l3_cache; /* l3 cache in kilobytes */ > }; NACK, it is *forbidden* to change public structs as this breaks ABI compatibility. Okay, get it. > diff --git a/src/remote/remote_protocol.x b/src/remote/remote_protocol.x > index b846ef2..6a16b4e 100644 > --- a/src/remote/remote_protocol.x > +++ b/src/remote/remote_protocol.x > @@ -489,6 +489,7 @@ struct remote_node_get_info_ret { /* insert@1 */ > int sockets; > int cores; > int threads; > + int l3_cache; > }; Likewise this breaks RPC compatibility. This info wil need to be reported in the capabilities XML instead. Sure. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

On Tue, Jan 10, 2017 at 12:05:00PM +0000, Qiao, Liyong wrote:
Hi Daniel,
Thanks for your comments.
I will not change public ABI nor break RPC.
I wonder if you would agree with extend virHostCPUGetInfoPopulateLinux to get l3 cache size ? Such as: virHostCPUGetInfoPopulateLinux(FILE *cpuinfo, virArch arch, unsigned int *cpus, unsigned int *mhz, unsigned int *nodes, unsigned int *sockets, unsigned int *cores, unsigned int *threads, unsigned int *l3_cache) ←---------
virHostCPUGetInfoPopulateLinux is doing parsing /proc/cpuinfo , so I think it’s the best way to get l3 cache size.
cat /proc/cpuinfo
/proc/cpuinfo is a pretty awful file because its contents are completely different on every linux architecture. It would be preferrable to get the cache sizes from /sys/devices/system/cpu/* if possible. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

On Tue, Jan 10, 2017 at 04:11:03PM +0800, Eli Qiao wrote:
This patch extends l3 cache infomation to nodeinfo output.
This doesn't make sense, in case there are multiple sockets with different L3 cache sizes, it can't be represented in nodeinfo. If there can be multiple values, you need to extend virsh capabilities instead.

Best Regards Eli Qiao(乔立勇)OpenStack Core team OTC Intel. -- On 10/01/2017, 8:32 PM, "Martin Kletzander" <mkletzan@redhat.com> wrote: On Tue, Jan 10, 2017 at 04:11:03PM +0800, Eli Qiao wrote: >This patch extends l3 cache infomation to nodeinfo output. > This doesn't make sense, in case there are multiple sockets with different L3 cache sizes, it can't be represented in nodeinfo. If there can be multiple values, you need to extend virsh capabilities instead. I don’t think it will work if we plug 2 different type of socket, if they can work together, they should have same size of L3 cache size, anyway, Daniel don’t agree to extend nodeinfo, I am trying to consider it as another way.

On Tue, Jan 10, 2017 at 04:11:03PM +0800, Eli Qiao wrote:
This patch extends l3 cache infomation to nodeinfo output.
Also, why only l3 cache. Why not expose full info about the CPU cache hierarchy. It feels wrong to expose only L3 cache and ignore other levels of cache. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

Also, why only l3 cache. Why not expose full info about the CPU cache hierarchy. It feels wrong to expose only L3 cache and ignore other levels of cache.
Okay, I’ll think how to expose there into capabilities. This is related to enable cache tune support in [1] The status in kernel is that only L3 cache can be tuned(by cat_l3 support in kernel) for now. Could you help to give some input for the RFC of cache tune? [1]https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html Thanks Eli.

Hi Daniel, I agree that to expose othe level cache but the only can be tuned (allocation). If we expose such kinds of information, the host should have ability to control such kinds of resources. In another thread, Martin told that there are cases which multiple sockets may has different values, I kinds of agree(but I don’t see that case), I agree to expose cache per socket, just wonder if cell == socket in libvirt? In my environment, I can see all socket_id in a cell are the same, wonder if I can extend cache information to cell node? Best Regards Eli Qiao(乔立勇)OpenStack Core team OTC Intel. -- On 11/01/2017, 9:31 AM, "Qiao, Liyong" <liyong.qiao@intel.com> wrote: > Also, why only l3 cache. Why not expose full info about > the CPU cache hierarchy. It feels wrong to expose only > L3 cache and ignore other levels of cache. Okay, I’ll think how to expose there into capabilities. This is related to enable cache tune support in [1] The status in kernel is that only L3 cache can be tuned(by cat_l3 support in kernel) for now. Could you help to give some input for the RFC of cache tune? [1]https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html Thanks Eli.

On Wed, Jan 11, 2017 at 09:31:04AM +0000, Qiao, Liyong wrote:
Hi Daniel,
I agree that to expose othe level cache but the only can be tuned (allocation). If we expose such kinds of information, the host should have ability to control such kinds of resources.
In another thread, Martin told that there are cases which multiple sockets may has different values, I kinds of agree(but I don’t see that case), I agree to expose cache per socket, just wonder if
cell == socket in libvirt? In my environment, I can see all socket_id in a cell are the same, wonder if I can extend cache information to cell node?
No, cell == NUMA node. There can be multiple sockets per NUMA node. If you see multiple CPUs with the same socket_id, this indicates they are cores within the same CPU socket. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

2017-01-11 17:34 GMT+08:00 Daniel P. Berrange <berrange@redhat.com>:
On Wed, Jan 11, 2017 at 09:31:04AM +0000, Qiao, Liyong wrote:
Hi Daniel,
I agree that to expose othe level cache but the only can be tuned (allocation). If we expose such kinds of information, the host should have ability to control such kinds of resources.
In another thread, Martin told that there are cases which multiple sockets may has different values, I kinds of agree(but I don’t see that case), I agree to expose cache per socket, just wonder if
cell == socket in libvirt? In my environment, I can see all socket_id in a cell are the same, wonder if I can extend cache information to cell node?
No, cell == NUMA node. There can be multiple sockets per NUMA node. If you see multiple CPUs with the same socket_id, this indicates they are cores within the same CPU socket.
Hi Daniel,
This's Eli again(gmail is better with formate) thanks for your reply, hmm... if so, we want to expose cache information though capabilities, we need another topology to expose all sockets such as <host> <uuid>f481c038-bb08-42e1-aa5f-f008a27e7050</uuid> <cpu> ... <cache> <sockets num = '2'> <socket id=0> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> <socket id=1> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> </sockets> </cache> </cpu> P.S support_allocation tag means the host support allocate that kinds of cache. Does this make sense.
Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
-- 天涯无处不重逢 a leaf duckweed belongs to the sea , where not to meet in life

On Wed, Jan 11, 2017 at 05:48:34PM +0800, 乔立勇 wrote:
2017-01-11 17:34 GMT+08:00 Daniel P. Berrange <berrange@redhat.com>:
On Wed, Jan 11, 2017 at 09:31:04AM +0000, Qiao, Liyong wrote:
Hi Daniel,
I agree that to expose othe level cache but the only can be tuned (allocation). If we expose such kinds of information, the host should have ability to control such kinds of resources.
In another thread, Martin told that there are cases which multiple sockets may has different values, I kinds of agree(but I don’t see that case), I agree to expose cache per socket, just wonder if
cell == socket in libvirt? In my environment, I can see all socket_id in a cell are the same, wonder if I can extend cache information to cell node?
No, cell == NUMA node. There can be multiple sockets per NUMA node. If you see multiple CPUs with the same socket_id, this indicates they are cores within the same CPU socket.
Hi Daniel,
This's Eli again(gmail is better with formate)
thanks for your reply,
hmm... if so, we want to expose cache information though capabilities, we need another topology to expose all sockets
such as
<host> <uuid>f481c038-bb08-42e1-aa5f-f008a27e7050</uuid> <cpu> ... <cache> <sockets num = '2'> <socket id=0> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> <socket id=1> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> </sockets> </cache> </cpu>
That's one possible option - I suggested another here: https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html I'm not sure whether it is better to do a nested structure as you have, or a flat structure as I did. In particular I'm wondering if we can assume caches are strictly hierarchical (in which case nested will work), or whether there can be sharing across branches (in which case flat will be needed). Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|

On Wed, Jan 11, 2017 at 10:08:25AM +0000, Daniel P. Berrange wrote:
On Wed, Jan 11, 2017 at 05:48:34PM +0800, 乔立勇 wrote:
2017-01-11 17:34 GMT+08:00 Daniel P. Berrange <berrange@redhat.com>:
On Wed, Jan 11, 2017 at 09:31:04AM +0000, Qiao, Liyong wrote:
Hi Daniel,
I agree that to expose othe level cache but the only can be tuned (allocation). If we expose such kinds of information, the host should have ability to control such kinds of resources.
In another thread, Martin told that there are cases which multiple sockets may has different values, I kinds of agree(but I don’t see that case), I agree to expose cache per socket, just wonder if
cell == socket in libvirt? In my environment, I can see all socket_id in a cell are the same, wonder if I can extend cache information to cell node?
No, cell == NUMA node. There can be multiple sockets per NUMA node. If you see multiple CPUs with the same socket_id, this indicates they are cores within the same CPU socket.
Also don't forget you can have multiple NUMA nodes in one socket. Be it for example AMD Bulldozer and similar or kernel's fake NUMA.
Hi Daniel,
This's Eli again(gmail is better with formate)
thanks for your reply,
hmm... if so, we want to expose cache information though capabilities, we need another topology to expose all sockets
such as
<host> <uuid>f481c038-bb08-42e1-aa5f-f008a27e7050</uuid> <cpu> ... <cache> <sockets num = '2'> <socket id=0> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> <socket id=1> <l3_cache unit='KiB' support_allocation='yes'>56320</l3_cache> <l2_cache unit='KiB'''>256</l2_cache> </socket> </sockets> </cache> </cpu>
That's one possible option - I suggested another here:
https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html
I'm not sure whether it is better to do a nested structure as you have, or a flat structure as I did. In particular I'm wondering if we can assume caches are strictly hierarchical (in which case nested will work), or whether there can be sharing across branches (in which case flat will be needed).
I like your idea better. It doesn't have to be necessarily flat (you can group some parts of it), but I don't really care that much about how flat it is. I like it better because you can represent any split/shared caches. And being prepared for any cache layout is what I cared about.
Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|
-- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list

On Wed, Jan 11, 2017 at 01:31:02AM +0000, Qiao, Liyong wrote:
Also, why only l3 cache. Why not expose full info about the CPU cache hierarchy. It feels wrong to expose only L3 cache and ignore other levels of cache.
Okay, I’ll think how to expose there into capabilities. This is related to enable cache tune support in [1] The status in kernel is that only L3 cache can be tuned(by cat_l3 support in kernel) for now.
Right, but that's a characteristic of a specific hardware version. We need to consider that there may be future changes, or that different architectures may come up with different support status. So I'd rather see us fully describe the cache availability, rather than assume that L3 is the only thing we ever need to describe. Regards, Daniel -- |: http://berrange.com -o- http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://entangle-photo.org -o- http://search.cpan.org/~danberr/ :|
participants (5)
-
Daniel P. Berrange
-
Eli Qiao
-
Martin Kletzander
-
Qiao, Liyong
-
乔立勇