[libvirt] [PATCH] storage: Default pool permission mode to 0711
by Osier Yang
Per the typical use of libvirt is to fork the qemu process with
qemu:qemu. Setting the pool permission mode as 0700 by default
will prevent the guest start with permission reason.
Define macro for the default pool and vol permission modes
incidentally.
---
src/conf/storage_conf.c | 11 ++++++++---
1 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/src/conf/storage_conf.c b/src/conf/storage_conf.c
index bf4567f..6d4987b 100644
--- a/src/conf/storage_conf.c
+++ b/src/conf/storage_conf.c
@@ -47,6 +47,8 @@
#define VIR_FROM_THIS VIR_FROM_STORAGE
+#define DEFAULT_POOL_PERM_MODE 0711
+#define DEFAULT_VOL_PERM_MODE 0600
VIR_ENUM_IMPL(virStoragePool,
VIR_STORAGE_POOL_LAST,
@@ -812,7 +814,8 @@ virStoragePoolDefParseXML(xmlXPathContextPtr ctxt) {
goto cleanup;
if (virStorageDefParsePerms(ctxt, &ret->target.perms,
- "./target/permissions", 0700) < 0)
+ "./target/permissions",
+ DEFAULT_POOL_PERM_MODE) < 0)
goto cleanup;
}
@@ -1137,7 +1140,8 @@ virStorageVolDefParseXML(virStoragePoolDefPtr pool,
}
if (virStorageDefParsePerms(ctxt, &ret->target.perms,
- "./target/permissions", 0600) < 0)
+ "./target/permissions",
+ DEFAULT_VOL_PERM_MODE) < 0)
goto cleanup;
node = virXPathNode("./target/encryption", ctxt);
@@ -1168,7 +1172,8 @@ virStorageVolDefParseXML(virStoragePoolDefPtr pool,
}
if (virStorageDefParsePerms(ctxt, &ret->backingStore.perms,
- "./backingStore/permissions", 0600) < 0)
+ "./backingStore/permissions",
+ DEFAULT_VOL_PERM_MODE) < 0)
goto cleanup;
return ret;
--
1.7.7.3
12 years, 4 months
[libvirt] [PATCHv3 0/5] Virtio support for S390
by Viktor Mihajlovski
As 0.9.13 is stabilizing and I will not be available next week
I am sending this reworked patch set already today, looking forward
to comments.
This series adds support for the s390 flavor of virtio devices.
Since the s390 virtio devices are not implemented as PCI devices
it is necessary to refactor some of the device address assignment
code.
v2 changes
resent as thread
v3 changes
renumbered new virtio-s390 capability
fixed incorrect whitespace
fixed subject lines
Viktor Mihajlovski (5):
qemu: Extended qemuDomainAssignAddresses to be callable from
everywhere.
qemu: Change tests to use (modified) qemuDomainAssignAddresses
S390: Add support for virtio-s390 devices.
S390: Domain Schema for s390-virtio machines.
S390: Adding testcases for s390
docs/schemas/domaincommon.rng | 20 +++
src/conf/domain_conf.c | 11 +-
src/conf/domain_conf.h | 1 +
src/qemu/qemu_capabilities.c | 7 +
src/qemu/qemu_capabilities.h | 1 +
src/qemu/qemu_command.c | 139 ++++++++++++++++++--
src/qemu/qemu_command.h | 6 +-
src/qemu/qemu_driver.c | 14 +-
src/qemu/qemu_process.c | 42 +------
.../qemuxml2argv-console-virtio-s390.args | 9 ++
.../qemuxml2argv-console-virtio-s390.xml | 24 ++++
.../qemuxml2argv-disk-virtio-s390.args | 5 +
.../qemuxml2argv-disk-virtio-s390.xml | 22 +++
.../qemuxml2argv-minimal-s390.args | 5 +
.../qemuxml2argvdata/qemuxml2argv-minimal-s390.xml | 21 +++
.../qemuxml2argv-net-virtio-s390.args | 5 +
.../qemuxml2argv-net-virtio-s390.xml | 22 +++
tests/qemuxml2argvtest.c | 20 ++--
tests/qemuxmlnstest.c | 13 +--
tests/testutilsqemu.c | 31 +++++
20 files changed, 332 insertions(+), 86 deletions(-)
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-console-virtio-s390.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-console-virtio-s390.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-disk-virtio-s390.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-disk-virtio-s390.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-minimal-s390.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-minimal-s390.xml
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-net-virtio-s390.args
create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-net-virtio-s390.xml
12 years, 4 months
[libvirt] [PATCH] util: Use current uid and gid if they are passed as -1 for virDirCreate
by Osier Yang
All the callers of virDirCreate are updated incidentally.
---
src/storage/storage_backend_fs.c | 23 ++++++-----------------
src/util/util.c | 6 ++++++
2 files changed, 12 insertions(+), 17 deletions(-)
diff --git a/src/storage/storage_backend_fs.c b/src/storage/storage_backend_fs.c
index bde4528..233e001 100644
--- a/src/storage/storage_backend_fs.c
+++ b/src/storage/storage_backend_fs.c
@@ -789,17 +789,10 @@ virStorageBackendFileSystemBuild(virConnectPtr conn ATTRIBUTE_UNUSED,
/* Now create the final dir in the path with the uid/gid/mode
* requested in the config. If the dir already exists, just set
* the perms. */
- uid_t uid;
- gid_t gid;
-
- uid = (pool->def->target.perms.uid == (uid_t) -1)
- ? getuid() : pool->def->target.perms.uid;
- gid = (pool->def->target.perms.gid == (gid_t) -1)
- ? getgid() : pool->def->target.perms.gid;
-
if ((err = virDirCreate(pool->def->target.path,
pool->def->target.perms.mode,
- uid, gid,
+ pool->def->target.perms.uid,
+ pool->def->target.perms.gid,
VIR_DIR_CREATE_FORCE_PERMS |
VIR_DIR_CREATE_ALLOW_EXIST |
(pool->def->type == VIR_STORAGE_POOL_NETFS
@@ -811,9 +804,9 @@ virStorageBackendFileSystemBuild(virConnectPtr conn ATTRIBUTE_UNUSED,
/* Reflect the actual uid and gid to the config. */
if (pool->def->target.perms.uid == (uid_t) -1)
- pool->def->target.perms.uid = uid;
+ pool->def->target.perms.uid = getuid();
if (pool->def->target.perms.gid == (gid_t) -1)
- pool->def->target.perms.gid = gid;
+ pool->def->target.perms.gid = getgid();
if (flags != 0) {
ret = virStorageBackendMakeFileSystem(pool, flags);
@@ -1053,13 +1046,9 @@ static int createFileDir(virConnectPtr conn ATTRIBUTE_UNUSED,
return -1;
}
- uid_t uid = (vol->target.perms.uid == -1)
- ? getuid() : vol->target.perms.uid;
- gid_t gid = (vol->target.perms.gid == -1)
- ? getgid() : vol->target.perms.gid;
-
if ((err = virDirCreate(vol->target.path, vol->target.perms.mode,
- uid, gid,
+ vol->target.perms.uid,
+ vol->target.perms.gid,
VIR_DIR_CREATE_FORCE_PERMS |
(pool->def->type == VIR_STORAGE_POOL_NETFS
? VIR_DIR_CREATE_AS_UID : 0))) < 0) {
diff --git a/src/util/util.c b/src/util/util.c
index ce98d20..aec5512 100644
--- a/src/util/util.c
+++ b/src/util/util.c
@@ -1123,6 +1123,12 @@ int virDirCreate(const char *path, mode_t mode,
int waitret;
int status, ret = 0;
+ /* allow using -1 to mean "current value" */
+ if (uid == (uid_t) -1)
+ uid = getuid();
+ if (gid == (gid_t) -1)
+ gid = getgid();
+
if ((!(flags & VIR_DIR_CREATE_AS_UID))
|| (getuid() != 0)
|| ((uid == 0) && (gid == 0))
--
1.7.7.3
12 years, 4 months
[libvirt] [PATCH 00/13] Support hypervisor-threads-pin in vcpupin.
by tangchen
Hi~
Users can use vcpupin command to bind a vcpu thread to a specific physical cpu.
But besides vcpu threads, there are alse some other threads created by qemu
(known as hypervisor threads) that could not be explicitly bound to physical cpus.
The first 3 patches are from Wen Congyang, which implement Cgroup for differrent
hypervisors.
The other 10 patches implemented hypervisor threads binding, in two ways:
1) Use sched_setaffinity() function;
2) In cpuset cgroup.
A new xml element is introduced, and vcpupin command is improved, see below.
1. Introduce new xml elements:
<cputune>
......
<hypervisorpin cpuset='1'/>
</cputune>
2. Improve vcpupin command to support hypervisor threads binding.
For example, vm1 has the following configuration:
<cputune>
<vcpupin vcpu='1' cpuset='1'/>
<vcpupin vcpu='0' cpuset='0'/>
<hypervisorpin cpuset='1'/>
</cputune>
1) query all threads pining
# vcpupin vm1
VCPU: CPU Affinity
----------------------------------
0: 0
1: 1
Hypervisor: CPU Affinity
----------------------------------
*: 1
2) query hypervisor threads pining only
# vcpupin vm1 --hypervisor
Hypervisor: CPU Affinity
----------------------------------
*: 1
3) change hypervisor threads pining
# vcpupin vm1 --hypervisor 0-1
# vcpupin vm1 --hypervisor
Hypervisor: CPU Affinity
----------------------------------
*: 0-1
# taskset -p 397
pid 397's current affinity mask: 3
Note: If users want to pin a vcpu thread to pcpu, --vcpu option could no longer be omitted.
Tang Chen (10):
Enable cpuset cgroup and synchronous vcpupin info to cgroup.
Support hypervisorpin xml parse.
Introduce qemuSetupCgroupHypervisorPin and synchronize hypervisorpin
info to cgroup.
Add qemuProcessSetHypervisorAffinites and set hypervisor threads
affinities
Introduce virDomainHypervisorPinAdd and virDomainHypervisorPinDel
functions
Introduce qemudDomainPinHypervisorFlags and
qemudDomainGetHypervisorPinInfo in qemu driver.
Introduce remoteDomainPinHypervisorFlags and
remoteDomainGetHypervisorPinInfo functions in remote driver.
Introduce remoteDispatchDomainPinHypervisorFlags and
remoteDispatchDomainGetHypervisorPinInfo functions.
Introduce virDomainPinHypervisorFlags and
virDomainGetHypervisorPinInfo functions.
Improve vcpupin to support hypervisorpin dynically.
Wen Congyang (3):
Introduce the function virCgroupForHypervisor
introduce the function virCgroupMoveTask()
create a new cgroup and move all hypervisor threads to the new cgroup
daemon/remote.c | 103 +++++++++
docs/schemas/domaincommon.rng | 7 +
include/libvirt/libvirt.h.in | 9 +
src/conf/domain_conf.c | 173 +++++++++++++++-
src/conf/domain_conf.h | 7 +
src/driver.h | 13 +-
src/libvirt.c | 147 +++++++++++++
src/libvirt_private.syms | 6 +
src/libvirt_public.syms | 6 +
src/qemu/qemu_cgroup.c | 149 +++++++++++++-
src/qemu/qemu_cgroup.h | 5 +
src/qemu/qemu_driver.c | 266 ++++++++++++++++++++++-
src/qemu/qemu_process.c | 58 +++++
src/remote/remote_driver.c | 102 +++++++++
src/remote/remote_protocol.x | 24 ++-
src/remote_protocol-structs | 24 ++
src/util/cgroup.c | 132 +++++++++++-
src/util/cgroup.h | 9 +
tests/qemuxml2argvdata/qemuxml2argv-cputune.xml | 1 +
tests/vcpupin | 6 +-
tools/virsh.c | 145 ++++++++----
tools/virsh.pod | 16 +-
22 files changed, 1335 insertions(+), 73 deletions(-)
--
1.7.3.1
12 years, 4 months
[libvirt] Bug report 826704 - sanlock releases all resources on virsh detach-disk
by Frido Roose
Hello,
I logged a bug about using virsh detach-disk cleaning up all sanlock resources for the domain instead of only the device in question.
After a quick look into the code, I think a new method similar to virLockManagerSanlockAddResource is needed in case of detaching a disk from the domain, like e.g. virLockManagerSanlockDelResource (…).
Now it looks like virLockManagerSanlockRelease is called, which releases all resources:
if ((rv = sanlock_release(-1, priv->vm_pid, SANLK_REL_ALL, 0, NULL)) < 0) {
virsh detach-disk should then call virLockManagerSanlockDelResource for the given resource imo.
Any thoughts about this or why it is implemented like this?
--
Frido Roose
12 years, 4 months
[libvirt] [RFC 0/5] block: File descriptor passing using -open-hook-fd
by Stefan Hajnoczi
Libvirt can take advantage of SELinux to restrict the QEMU process and prevent
it from opening files that it should not have access to. This improves
security because it prevents the attacker from escaping the QEMU process if
they manage to gain control.
NFS has been a pain point for SELinux because it does not support labels (which
I believe are stored in extended attributes). In other words, it's not
possible to use SELinux goodness on QEMU when image files are located on NFS.
Today we have to allow QEMU access to any file on the NFS export rather than
restricting specifically to the image files that the guest requires.
File descriptor passing is a solution to this problem and might also come in
handy elsewhere. Libvirt or another external process chooses files which QEMU
is allowed to access and provides just those file descriptors - QEMU cannot
open the files itself.
This series adds the -open-hook-fd command-line option. Whenever QEMU needs to
open an image file it sends a request over the given UNIX domain socket. The
response includes the file descriptor or an errno on failure. Please see the
patches for details on the protocol.
The -open-hook-fd approach allows QEMU to support file descriptor passing
without changing -drive. It also supports snapshot_blkdev and other commands
that re-open image files.
Anthony Liguori <aliguori(a)us.ibm.com> wrote most of these patches. I added a
demo -open-hook-fd server and added some small fixes. Since Anthony is
traveling right now I'm sending the RFC for discussion.
Anthony Liguori (3):
block: add open() wrapper that can be hooked by libvirt
block: add new command line parameter that and protocol description
block: plumb up open-hook-fd option
Stefan Hajnoczi (2):
osdep: add qemu_recvmsg() wrapper
Example -open-hook-fd server
block.c | 107 ++++++++++++++++++++++++++++++++++++++
block.h | 2 +
block/raw-posix.c | 18 +++----
block/raw-win32.c | 2 +-
block/vdi.c | 2 +-
block/vmdk.c | 6 +--
block/vpc.c | 2 +-
block/vvfat.c | 4 +-
block_int.h | 12 +++++
osdep.c | 46 +++++++++++++++++
qemu-common.h | 2 +
qemu-options.hx | 42 +++++++++++++++
test-fd-passing.c | 147 +++++++++++++++++++++++++++++++++++++++++++++++++++++
vl.c | 3 ++
14 files changed, 378 insertions(+), 17 deletions(-)
create mode 100644 test-fd-passing.c
--
1.7.10
12 years, 4 months
[libvirt] [PATCH] virsh: Clarify documentation for virsh dompmsuspend command
by Peter Krempa
Clarify the docs to make more clear what this command does and that it
requires a guest agent running in the guest.
---
tools/virsh.c | 7 +++++--
tools/virsh.pod | 10 +++++++++-
2 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/tools/virsh.c b/tools/virsh.c
index 265857d..55cbe2b 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -2761,8 +2761,11 @@ cmdSuspend(vshControl *ctl, const vshCmd *cmd)
* "dompmsuspend" command
*/
static const vshCmdInfo info_dom_pm_suspend[] = {
- {"help", N_("suspend a domain for a given time duration")},
- {"desc", N_("Suspend a running domain for a given time duration.")},
+ {"help", N_("suspend a domain gracefully using power management "
+ "functions")},
+ {"desc", N_("Suspends a running domain using guest OS's power management. "
+ "(Note: This requires a guest agent configured and running in "
+ "the guest OS).")},
{NULL, NULL}
};
diff --git a/tools/virsh.pod b/tools/virsh.pod
index f83a29d..6250b89 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -1487,7 +1487,7 @@ Moves a domain out of the suspended state. This will allow a previously
suspended domain to now be eligible for scheduling by the underlying
hypervisor.
-=item B<dompmsuspend> I<domain-id> I<target>
+=item B<dompmsuspend> I<domain-id> I<target> [I<--duration>]
Suspend a running domain into one of these states (possible I<target>
values):
@@ -1495,6 +1495,14 @@ values):
disk equivallent of S4 ACPI state
hybrid RAM is saved to disk but not powered off
+The I<--duration> argument specifies number of seconds before the domain is
+woken up after it was suspended (see also B<dompmwakeup>). Default is 0 for
+unlimited suspend time. (This feature isn't currently supported by any
+hypervisor driver and 0 should be used.).
+
+Note that this command requires a guest agent configured and running in the
+domain's guest OS.
+
=item B<dompmwakeup> I<domain-id>
Wakeup a domain suspended by dompmsuspend command. Injects a wakeup
--
1.7.8.6
12 years, 4 months
[libvirt] [PATCH 0/3] Fix filling of nodeinfo structure and add test cases
by Peter Krempa
For explanation see patch 1/3.
Peter Krempa (3):
nodeinfo: Fix gathering of nodeinfo data structure
test: Add new test case for nodeinfotest
test: Add test case for nodeinfotest if host machine doesn't have
NUMA
src/nodeinfo.c | 311 +++++++++-------
.../linux-nodeinfo-sysfs-test-2/node/node0/cpu0 | 1 +
.../linux-nodeinfo-sysfs-test-2/node/node0/cpu1 | 1 +
.../linux-nodeinfo-sysfs-test-3-cpu-x86-output.txt | 2 +-
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu0 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu12 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu16 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu20 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu4 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/cpu8 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node0/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu24 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu28 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu32 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu36 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu40 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/cpu44 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node1/meminfo | 36 +-
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu11 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu15 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu19 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu23 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu3 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/cpu7 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node2/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu27 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu31 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu35 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu39 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu43 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/cpu47 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node3/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu10 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu14 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu18 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu2 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu22 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/cpu6 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node4/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu26 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu30 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu34 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu38 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu42 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/cpu46 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node5/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu1 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu13 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu17 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu21 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu5 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/cpu9 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node6/meminfo | 40 +-
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu25 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu29 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu33 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu37 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu41 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/cpu45 | 1 +
.../linux-nodeinfo-sysfs-test-3/node/node7/meminfo | 38 +-
.../linux-nodeinfo-sysfs-test-3/node/possible | Bin 4 -> 5 bytes
.../linux-nodeinfo-sysfs-test-4-x86.cpuinfo | 400 ++++++++++++++++++++
.../cpu/cpu0/topology/core_id | 1 +
.../cpu/cpu0/topology/physical_package_id | 1 +
.../cpu/cpu0/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu1/online | 1 +
.../cpu/cpu1/topology/core_id | 1 +
.../cpu/cpu1/topology/physical_package_id | 1 +
.../cpu/cpu1/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu10/online | 1 +
.../cpu/cpu10/topology/core_id | 1 +
.../cpu/cpu10/topology/physical_package_id | 1 +
.../cpu/cpu10/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu11/online | 1 +
.../cpu/cpu11/topology/core_id | 1 +
.../cpu/cpu11/topology/physical_package_id | 1 +
.../cpu/cpu11/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu12/online | 1 +
.../cpu/cpu12/topology/core_id | 1 +
.../cpu/cpu12/topology/physical_package_id | 1 +
.../cpu/cpu12/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu13/online | 1 +
.../cpu/cpu13/topology/core_id | 1 +
.../cpu/cpu13/topology/physical_package_id | 1 +
.../cpu/cpu13/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu14/online | 1 +
.../cpu/cpu14/topology/core_id | 1 +
.../cpu/cpu14/topology/physical_package_id | 1 +
.../cpu/cpu14/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu15/online | 1 +
.../cpu/cpu15/topology/core_id | 1 +
.../cpu/cpu15/topology/physical_package_id | 1 +
.../cpu/cpu15/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu2/online | 1 +
.../cpu/cpu2/topology/core_id | 1 +
.../cpu/cpu2/topology/physical_package_id | 1 +
.../cpu/cpu2/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu3/online | 1 +
.../cpu/cpu3/topology/core_id | 1 +
.../cpu/cpu3/topology/physical_package_id | 1 +
.../cpu/cpu3/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu4/online | 1 +
.../cpu/cpu4/topology/core_id | 1 +
.../cpu/cpu4/topology/physical_package_id | 1 +
.../cpu/cpu4/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu5/online | 1 +
.../cpu/cpu5/topology/core_id | 1 +
.../cpu/cpu5/topology/physical_package_id | 1 +
.../cpu/cpu5/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu6/online | 1 +
.../cpu/cpu6/topology/core_id | 1 +
.../cpu/cpu6/topology/physical_package_id | 1 +
.../cpu/cpu6/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu7/online | 1 +
.../cpu/cpu7/topology/core_id | 1 +
.../cpu/cpu7/topology/physical_package_id | 1 +
.../cpu/cpu7/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu8/online | 1 +
.../cpu/cpu8/topology/core_id | 1 +
.../cpu/cpu8/topology/physical_package_id | 1 +
.../cpu/cpu8/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/cpu/cpu9/online | 1 +
.../cpu/cpu9/topology/core_id | 1 +
.../cpu/cpu9/topology/physical_package_id | 1 +
.../cpu/cpu9/topology/thread_siblings | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu0 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu1 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu2 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu3 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu4 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu5 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu6 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/cpu7 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node0/meminfo | 29 ++
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu10 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu11 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu12 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu13 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu14 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu15 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu8 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/cpu9 | 1 +
.../linux-nodeinfo-sysfs-test-4/node/node1/meminfo | 29 ++
.../linux-nodeinfo-sysfs-test-4/node/possible | Bin 0 -> 5 bytes
.../linux-nodeinfo-sysfs-test-5-x86.cpuinfo | 100 +++++
.../cpu/cpu0/topology/core_id | 1 +
.../cpu/cpu0/topology/core_siblings | 1 +
.../cpu/cpu0/topology/core_siblings_list | 1 +
.../cpu/cpu0/topology/physical_package_id | 1 +
.../cpu/cpu0/topology/thread_siblings | 1 +
.../cpu/cpu0/topology/thread_siblings_list | 1 +
.../linux-nodeinfo-sysfs-test-5/cpu/cpu1/online | 1 +
.../cpu/cpu1/topology/core_id | 1 +
.../cpu/cpu1/topology/core_siblings | 1 +
.../cpu/cpu1/topology/core_siblings_list | 1 +
.../cpu/cpu1/topology/physical_package_id | 1 +
.../cpu/cpu1/topology/thread_siblings | 1 +
.../cpu/cpu1/topology/thread_siblings_list | 1 +
.../linux-nodeinfo-sysfs-test-5/cpu/cpu2/online | 1 +
.../cpu/cpu2/topology/core_id | 1 +
.../cpu/cpu2/topology/core_siblings | 1 +
.../cpu/cpu2/topology/core_siblings_list | 1 +
.../cpu/cpu2/topology/physical_package_id | 1 +
.../cpu/cpu2/topology/thread_siblings | 1 +
.../cpu/cpu2/topology/thread_siblings_list | 1 +
.../linux-nodeinfo-sysfs-test-5/cpu/cpu3/online | 1 +
.../cpu/cpu3/topology/core_id | 1 +
.../cpu/cpu3/topology/core_siblings | 1 +
.../cpu/cpu3/topology/core_siblings_list | 1 +
.../cpu/cpu3/topology/physical_package_id | 1 +
.../cpu/cpu3/topology/thread_siblings | 1 +
.../cpu/cpu3/topology/thread_siblings_list | 1 +
tests/nodeinfotest.c | 2 +
173 files changed, 1053 insertions(+), 280 deletions(-)
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-2/node/node0/cpu0
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-2/node/node0/cpu1
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu0
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu12
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu16
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu20
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu4
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node0/cpu8
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu24
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu28
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu32
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu36
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu40
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node1/cpu44
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu11
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu15
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu19
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu23
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu3
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node2/cpu7
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu27
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu31
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu35
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu39
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu43
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node3/cpu47
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu10
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu14
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu18
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu2
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu22
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node4/cpu6
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu26
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu30
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu34
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu38
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu42
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node5/cpu46
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu1
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu13
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu17
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu21
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu5
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node6/cpu9
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu25
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu29
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu33
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu37
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu41
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-3/node/node7/cpu45
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4-x86.cpuinfo
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu0/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu0/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu0/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu1/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu1/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu1/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu1/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu10/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu10/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu10/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu10/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu11/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu11/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu11/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu11/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu12/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu12/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu12/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu12/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu13/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu13/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu13/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu13/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu14/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu14/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu14/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu14/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu15/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu15/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu15/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu15/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu2/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu2/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu2/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu2/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu3/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu3/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu3/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu3/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu4/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu4/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu4/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu4/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu5/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu5/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu5/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu5/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu6/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu6/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu6/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu6/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu7/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu7/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu7/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu7/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu8/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu8/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu8/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu8/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu9/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu9/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu9/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/cpu/cpu9/topology/thread_siblings
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu0
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu1
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu2
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu3
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu4
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu5
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu6
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/cpu7
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node0/meminfo
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu10
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu11
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu12
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu13
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu14
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu15
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu8
create mode 120000 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/cpu9
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/node1/meminfo
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-4/node/possible
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5-x86.cpuinfo
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/core_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/core_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu0/topology/thread_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/core_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/core_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu1/topology/thread_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/core_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/core_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu2/topology/thread_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/online
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/core_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/core_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/core_siblings_list
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/physical_package_id
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/thread_siblings
create mode 100644 tests/nodeinfodata/linux-nodeinfo-sysfs-test-5/cpu/cpu3/topology/thread_siblings_list
--
1.7.8.6
12 years, 4 months
[libvirt] [PATCH 00/12] Fine grained access control for libvirt APIs
by Daniel P. Berrange
This is a repost of
https://www.redhat.com/archives/libvir-list/2012-January/msg00907.html
which got no comments last time out.
This series of patch is the minimal required to get a working proof
of concept implementation of fine grained access control in libvirt.
This demonstrates
- Obtaining a client identity from a socket
- Ensuring RPC calls are executed with the correct identity sset
- A policykit access driver that checks based on access vector alone
- A SELinux access driver that checks based on access vector + object
- A set of hooks in the QEMU driver to protect virDomainObjPtr access
Things that are not done
- APIs for changing the real/effective identity post-connect
- A simple RBAC access driver for doing (Access vector, object)
checks
- SELinux policy for the SELinux driver
- Access control hooks on all other QEMU driver methods
- Access control hooks in LXC, UML, other libvirtd side drivers
- Access control hooks in storage, network, interface, etc drivers
- Document WTF todo to propagate SELinux contexts across TCP
sockets using IPSec. Any hints welcome...
- Lots more I can't think of right now
I should note that the policykit driver is mostly useless because it
is unable to let you do checks on anything other than permission name
and UNIX process ID at this time. So what I've implemented with the
polkit driver is really little more than a slightly more fine grained
version of the VIR_CONNECT_RO flag. In theory it is supposed to be
extendable to allow other types of identity information besides
the process ID, and to include some kind of object identiers in
the permission check, but no one seems to be attacking this.
So I expect the simple RBAC driver to be the most used one in the
common case usage of libvirt, and of course the SELinux driver.
12 years, 4 months
[libvirt] [PATCH libvirt-java 1/2] Return a byte[] array with secretGetValue
by Wido den Hollander
We break the API with this, but Java does not support multiple method signatures with different return types.
The old method returned a String, but since a secret can be binary data this type is not suited.
Users who now that their secret is in fact a String, can use cast with:
Secret secret = conn.secretLookupByUUIDString("uuuuuuuid");
String value = new String(secret.getValue());
Signed-off-by: Wido den Hollander <wido(a)widodh.nl>
---
src/main/java/org/libvirt/Secret.java | 13 ++++++++++---
src/main/java/org/libvirt/jna/Libvirt.java | 2 +-
2 files changed, 11 insertions(+), 4 deletions(-)
diff --git a/src/main/java/org/libvirt/Secret.java b/src/main/java/org/libvirt/Secret.java
index 48f7895..39d9122 100644
--- a/src/main/java/org/libvirt/Secret.java
+++ b/src/main/java/org/libvirt/Secret.java
@@ -5,6 +5,9 @@ import org.libvirt.jna.SecretPointer;
import com.sun.jna.Native;
import com.sun.jna.NativeLong;
+import com.sun.jna.ptr.LongByReference;
+import com.sun.jna.Pointer;
+import java.nio.ByteBuffer;
/**
* A secret defined by libvirt
@@ -106,12 +109,16 @@ public class Secret {
/**
* Fetches the value of the secret
- *
+ *
* @return the value of the secret, or null on failure.
*/
- public String getValue() throws LibvirtException {
- String returnValue = libvirt.virSecretGetValue(VSP, new NativeLong(), 0);
+ public byte[] getValue() throws LibvirtException {
+ LongByReference value_size = new LongByReference();
+ Pointer value = libvirt.virSecretGetValue(VSP, value_size, 0);
processError();
+ ByteBuffer bb = value.getByteBuffer(0, value_size.getValue());
+ byte[] returnValue = new byte[bb.remaining()];
+ bb.get(returnValue);
return returnValue;
}
diff --git a/src/main/java/org/libvirt/jna/Libvirt.java b/src/main/java/org/libvirt/jna/Libvirt.java
index b1e53a2..f53199d 100644
--- a/src/main/java/org/libvirt/jna/Libvirt.java
+++ b/src/main/java/org/libvirt/jna/Libvirt.java
@@ -330,7 +330,7 @@ public interface Libvirt extends Library {
public int virSecretGetUUID(SecretPointer virSecretPtr, byte[] uuidString);
public int virSecretGetUUIDString(SecretPointer virSecretPtr, byte[] uuidString);
public String virSecretGetUsageID(SecretPointer virSecretPtr);
- public String virSecretGetValue(SecretPointer virSecretPtr, NativeLong value_size, int flags);
+ public Pointer virSecretGetValue(SecretPointer virSecretPtr, LongByReference value_size, int flags);
public String virSecretGetXMLDesc(SecretPointer virSecretPtr, int flags);
public SecretPointer virSecretLookupByUsage(ConnectionPointer virConnectPtr, int usageType, String usageID);
public SecretPointer virSecretLookupByUUID(ConnectionPointer virConnectPtr, byte[] uuidBytes);
--
1.7.9.5
12 years, 4 months