[libvirt] [test-API][PATCH] Add 2 host node memory API cases
by Wayne Sun
add 2 host node memory cases and update conf
- node_mem_param: tuning host node memory parameters.
- node_memory: get host node memory info, including host free
memory, node free memory and node memory stats.
- numa_param conf is updated with the 2 new cases
Signed-off-by: Wayne Sun <gsun(a)redhat.com>
---
cases/numa_param.conf | 8 ++++
repos/numa/node_mem_param.py | 86 ++++++++++++++++++++++++++++++++++++
repos/numa/node_memory.py | 101 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 195 insertions(+)
create mode 100644 repos/numa/node_mem_param.py
create mode 100644 repos/numa/node_memory.py
diff --git a/cases/numa_param.conf b/cases/numa_param.conf
index 64268a3..515fb1f 100644
--- a/cases/numa_param.conf
+++ b/cases/numa_param.conf
@@ -1,3 +1,11 @@
+numa:node_memory
+
+numa:node_mem_param
+ shm_pages_to_scan
+ 200
+ shm_sleep_millisecs
+ 20
+
domain:install_linux_cdrom
guestname
$defaultname
diff --git a/repos/numa/node_mem_param.py b/repos/numa/node_mem_param.py
new file mode 100644
index 0000000..ba6f8f4
--- /dev/null
+++ b/repos/numa/node_mem_param.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Test tuning host node memory parameters
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+
+required_params = ()
+optional_params = {"shm_pages_to_scan": 100,
+ "shm_sleep_millisecs": 20,
+ "shm_merge_across_nodes": 1
+ }
+
+KSM_PATH = "/sys/kernel/mm/ksm/"
+
+def node_mem_param(params):
+ """test set host node memory parameters
+ """
+ logger = params['logger']
+ shm_pages_to_scan = params.get('shm_pages_to_scan')
+ shm_sleep_millisecs = params.get('shm_sleep_millisecs')
+ shm_merge_across_nodes = params.get('shm_merge_across_nodes')
+
+ if not shm_pages_to_scan \
+ and not shm_sleep_millisecs \
+ and not shm_merge_across_nodes:
+ logger.error("given param is none")
+ return 1
+
+ param_dict = {}
+ tmp = ('shm_pages_to_scan', 'shm_sleep_millisecs', 'shm_merge_across_nodes')
+ tmp1 = ('pages_to_scan', 'sleep_millisecs', 'merge_across_nodes')
+ for i in tmp:
+ if eval(i):
+ param_dict[i] = int(eval(i))
+
+ logger.info("the given param dict is: %s" % param_dict)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ logger.info("get host node memory parameters")
+ mem_pre = conn.getMemoryParameters(0)
+ logger.info("host node memory parameters is: %s" % mem_pre)
+
+ logger.info("set host node memory parameters with given param %s" %
+ param_dict)
+ conn.setMemoryParameters(param_dict, 0)
+ logger.info("set host node memory parameters done")
+
+ logger.info("get host node memory parameters")
+ mem_pos = conn.getMemoryParameters(0)
+ logger.info("host node memory parameters is: %s" % mem_pos)
+
+ for i in tmp:
+ if eval(i):
+ if not mem_pos[i] == param_dict[i]:
+ logger.error("%s is not set as expected" % i)
+
+ logger.info("node memory parameters is set as expected")
+
+ logger.info("check tuning detail under %s" % KSM_PATH)
+
+ mem_tmp = {}
+ for i in tmp1:
+ str_tmp = 'shm_%s' % i
+ if eval(str_tmp):
+ path = "%s%s" % (KSM_PATH, i)
+ f = open(path)
+ ret = int(f.read().split('\n')[0])
+ f.close()
+ logger.info("%s value is: %s" % (path, ret))
+ mem_tmp[str_tmp] = ret
+
+ if mem_tmp == param_dict:
+ logger.info("tuning detail under %s is expected" % KSM_PATH)
+ else:
+ logger.error("check with tuning detail under %s failed" % KSM_PATH)
+ return 1
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
diff --git a/repos/numa/node_memory.py b/repos/numa/node_memory.py
new file mode 100644
index 0000000..47d3b4a
--- /dev/null
+++ b/repos/numa/node_memory.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# Test get host node memory info, including host free
+# memory, node free memory and node memory stats.
+
+import math
+
+import libvirt
+from libvirt import libvirtError
+
+from src import sharedmod
+from utils import utils
+
+required_params = ()
+optional_params = {}
+
+CMD = "numastat -m"
+
+def node_memory(params):
+ """test get host node memory info
+ """
+ logger = params['logger']
+
+ cmd = "lscpu|grep 'NUMA node(s)'"
+ ret, output = utils.exec_cmd(cmd, shell=True)
+ node_num = int(output[0].split(' ')[-1])
+ logger.info("host total nodes number is: %s" % node_num)
+
+ conn = sharedmod.libvirtobj['conn']
+
+ try:
+ logger.info("get host total free memory")
+ mem = conn.getFreeMemory()/1048576
+ logger.info("host free memory total is: %s KiB" % mem)
+ ret, out = utils.exec_cmd(CMD, shell=True)
+ mem_total = " ".join(out[5].split()).split()[-1]
+ mem_total = int(eval(mem_total))
+ logger.info("output of '%s' is: %s KiB" % (CMD, mem_total))
+
+ if math.fabs(mem - mem_total) > 1:
+ logger.error("free memory mismatch with result of '%s'" % CMD)
+ return 1
+ else:
+ logger.info("get host free memory succeed")
+
+ logger.info("get free memory of nodes")
+ ret = conn.getCellsFreeMemory(0, node_num)
+ mem = [i/1048576 for i in ret]
+ logger.info("nodes free memory list is: %s" % mem)
+
+ ret, out = utils.exec_cmd(CMD, shell=True)
+ mem_tmp = " ".join(out[5].split()).split()[1:-1]
+ node_mem = [int(eval(i)) for i in mem_tmp]
+ logger.info("output of '%s' is: %s" % (CMD, node_mem))
+
+ for i in range(node_num):
+ if math.fabs(mem[i] - node_mem[i]) > 1:
+ logger.error("node %s free memory mismatch with command '%s'"
+ % CMD)
+ return 1
+
+ logger.info("get node free memory succeed")
+
+ logger.info("get node memory stats")
+ node_dict = {}
+ for i in range(node_num):
+ ret = conn.getMemoryStats(i, 0)
+ for j in ret.keys():
+ ret[j] = ret[j]/1024
+ node_dict[i] = ret
+ logger.info("node %s memory stats is: %s" % (i, node_dict[i]))
+
+ node_tmp = {}
+ logger.info("get node memory stats with '%s'" % CMD)
+ ret, out = utils.exec_cmd(CMD, shell=True)
+ mem_total_tmp = " ".join(out[4].split()).split()[1:-1]
+ mem_free_tmp = " ".join(out[5].split()).split()[1:-1]
+ for i in range(node_num):
+ dict_tmp = {}
+ dict_tmp['total'] = int(eval(mem_total_tmp[i]))
+ dict_tmp['free'] = int(eval(mem_free_tmp[i]))
+ node_tmp[i] = dict_tmp
+ logger.info("node %s memory stats is: %s" % (i, node_tmp[i]))
+
+ for i in range(node_num):
+ if math.fabs(node_tmp[i]['total'] - node_dict[i]['total']) > 1:
+ logger.error("node %s total memory is mismatch with of '%s'" %
+ (i, CMD))
+ return 1
+
+ if math.fabs(node_tmp[i]['free'] - node_dict[i]['free']) > 1:
+ logger.error("node %s free memory is mismatch with of '%s'" %
+ (i, CMD))
+ return 1
+
+ logger.info("get node memory stats succeed")
+
+ except libvirtError, e:
+ logger.error("libvirt call failed: " + str(e))
+ return 1
+
+ return 0
--
1.8.1
12 years, 1 month
[libvirt] libvirt testing with autotest
by Cole Robinson
Recently I've been playing with virt autotest, which provides a number of qemu
and libvirt functional tests. Here's some notes on how to get it running:
The super short version of running the libvirt smoke tests on Fedora 18:
yum install autotest-framework
git clone git://github.com/autotest/virt-test.git autotest-virt-tests
cd autotest-virt-tests
./libvirt/get_started.py
./run --type libvirt
For me the default tests take under a minute. Output looks like:
SETUP: PASS (1.00 s)
DATA DIR: /home/crobinso/virt_test
DEBUG LOG:
/home/crobinso/src/autotest-virt-tests/logs/run-2013-02-11-14.21.47/debug.log
TESTS: 28
(1/28) unattended_install.import.import: PASS (23.31 s)
(2/28) virsh_domname.vm_state.vm_running.with_valid_option.domid: PASS (1.59 s)
(3/28) virsh_domname.vm_state.vm_running.with_valid_option.uuid: PASS (0.47 s)
...
(27/28) virsh_domname.with_libvirtd_stop.with_valid_option.uuid: PASS (0.55 s)
(28/28) remove_guest.without_disk: PASS (6.77 s)
TOTAL TIME: 42.30 s
TESTS PASSED: 28
TESTS FAILED: 0
SUCCESS RATE: 100.00 %
Now with a bit more detail.
The first 4 steps are one time setup, though you will want to update
autotest-virt-tests every now and then.
The default set of libvirt tests import a VM with virt-install, verify that we
can SSH in, run a bunch of virsh domname validation, and remove the guest.
Logs are dumped into the autotest-virt-tests/logs directory.
autotest-framework is just the client side piece. autotest also has a server
component which has web UI, handles job scheduling, and tracks results in a
database, but not needed for casual developer usage.
virt-test.git is the repo that actually contains all the qemu and libvirt test
cases. There are also tests for virt-v2v, libguestfs, ovirt, and openvswitch,
but qemu and libvirt have the most tests by far. libvirt has 600 test cases,
but many of these are just parameter validation matrices. libvirt TCK likely
has more functional tests.
All the code is in python. The libvirt test cases actually use virsh and not
the python bindings. This was a deliberate choice to get better virsh
coverage. The code is pretty nice so impl wise dealing with virsh isn't hard
at all.
./libvirt/get_started.py is an interactive script that ensures needed
dependencies are installed, and pulls down a pre made Fedora 17 jeos (just
enough operating system) image. All the tests run against that image by default.
./run is how you'll interact with everything. The important invocations:
# List all libvirt tests
./run --type libvirt --list-tests
# Show all test debugging and stdout to the console
./run --verbose ...
# Run only specific tests. The default libvirt set looks like
./run --type libvirt --tests "unattended_install.import.import virsh_domname
remove_guest.without_disk"
# See all options
./run --help
Autotest has useful and simple infrastructure for logging in to virtual
machines and running commands in there. So for doing end to end tests like
verifying hotplug succeeded or verifying snapshots actually worked it seems
quite handy. It can also assemble a video based on screenshots of the guest,
so you can rewatch an install failure or similar.
A few misc things:
- The default libvirt tests seem to work fine as non-root, though every
libvirt test is marked as 'root only'. I'm not sure what the practical effect
of that is.
- If you interrupt the tests, you'll have to destroy and undefine the
'autotest-vm1' VM, or next run will fail. If the test suite completes, it will
clean up after itself. All this should be fixed.
- There are migration tests for libvirt, but I haven't looked at them.
- All the tests run against system installed libvirt. You can probably get
around this by sticking /path/to/libvirt.git/tools in PATH, but some tests
want to restart libvirtd so it isn't that simple.
- Running the tests creates ~/virt_test and moves the F17 image there. If you
move other media there they can be used for various install tests. You can
override this with VIRT_TEST_DATA_DIR or ./run --data-dir, but IMO there
should be a ~/.autotestconfig or something.
- qemu also has a basic set of smoke tests that test migration, no extra
config required. A custom qemu binary can be used with ./run --qemu-bin.
Probably useful even for libvirt devs that are tracking upstream qemu, if you
suspect qemu.git may be currently be broken.
The virt-test wiki also has much more useful info:
https://github.com/autotest/virt-test/wiki
Thanks,
Cole
12 years, 1 month
[libvirt] [PATCH] qemu: fix an off-by-one error in qemuDomainGetPercpuStats
by Guannan Ren
The max value of number of cpus to compute(id) should not
be equal or greater than max cpu number.
The bug ocurrs when id value is equal to max cpu number which
leads to the off-by-one error in the following for loop.
# virsh cpu-stats guest --start 1
error: Failed to virDomainGetCPUStats()
error: internal error cpuacct parse error
---
src/qemu/qemu_driver.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index dc35b91..54a6d35 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -14259,9 +14259,9 @@ qemuDomainGetPercpuStats(virDomainObjPtr vm,
param_idx = 0;
/* number of cpus to compute */
- id = max_id;
-
- if (max_id - start_cpu > ncpus - 1)
+ if ((start_cpu + ncpus) >= max_id)
+ id = max_id - 1;
+ else
id = start_cpu + ncpus - 1;
for (i = 0; i <= id; i++) {
--
1.7.11.2
12 years, 1 month
[libvirt] [PATCH] qemu: Fix the memory leak
by Osier Yang
Found by John Ferlan (coverity script)
---
Assuming an ACK from John, and it's trivial, so pushed
---
src/qemu/qemu_conf.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 8299b79..33fd67d 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -1042,6 +1042,7 @@ qemuAddSharedDisk(virQEMUDriverPtr driver,
if ((VIR_ALLOC(entry) < 0) ||
(VIR_ALLOC_N(entry->domains, 1) < 0) ||
!(entry->domains[0] = strdup(name))) {
+ qemuSharedDiskEntryFree(entry, NULL);
virReportOOMError();
goto cleanup;
}
--
1.7.7.6
12 years, 1 month
[libvirt] [PATCH] keepalive: Guard against integer overflow
by John Ferlan
Don't allow interval to be > MAX_INT/1000 in virKeepAliveStart()
Guard against possible overflow in virKeepAliveTimeout() by setting the
timeout to be MAX_INT/1000 since the math following will multiply it by 1000.
This is a follow-up of sorts from a Coverity change made last month:
https://www.redhat.com/archives/libvir-list/2013-January/msg02267.html
where it was noted that the timeout value math needed overflow protection.
---
src/rpc/virkeepalive.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/src/rpc/virkeepalive.c b/src/rpc/virkeepalive.c
index d1fa642..6d69559 100644
--- a/src/rpc/virkeepalive.c
+++ b/src/rpc/virkeepalive.c
@@ -252,6 +252,12 @@ virKeepAliveStart(virKeepAlivePtr ka,
_("keepalive interval already set"));
goto cleanup;
}
+ /* Guard against overflow */
+ if (interval > INT_MAX / 1000) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("keepalive interval too large"));
+ goto cleanup;
+ }
ka->interval = interval;
ka->count = count;
ka->countToDeath = count;
@@ -323,6 +329,9 @@ virKeepAliveTimeout(virKeepAlivePtr ka)
timeout = ka->interval - (time(NULL) - ka->intervalStart);
if (timeout < 0)
timeout = 0;
+ /* Guard against overflow */
+ if (timeout > INT_MAX / 1000)
+ timeout = INT_MAX / 1000;
}
virObjectUnlock(ka);
--
1.7.11.7
12 years, 1 month
[libvirt] [PATCH] libxl: Fix setting of disk backend
by Jim Fehlig
The libxl driver was setting the backend field of libxl_device_disk
structure to LIBXL_DISK_BACKEND_TAP when the driver element of disk
configuration was not specified. This needlessly forces the use of
blktap driver, which may not be loaded in dom0
https://bugzilla.redhat.com/show_bug.cgi?id=912488
Ian Campbell suggested that LIBXL_DISK_BACKEND_UNKNOWN is a better
default in this case
https://www.redhat.com/archives/libvir-list/2013-February/msg01126.html
---
src/libxl/libxl_conf.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
index 43fb8b1..4ce5dec 100644
--- a/src/libxl/libxl_conf.c
+++ b/src/libxl/libxl_conf.c
@@ -525,9 +525,13 @@ libxlMakeDisk(virDomainDiskDefPtr l_disk, libxl_device_disk *x_disk)
return -1;
}
} else {
- /* No driverName - default to raw/tap?? */
+ /*
+ * If driverName is not specified, default to raw as per
+ * xl-disk-configuration.txt in the xen documentation and let
+ * libxl pick a suitable backend.
+ */
x_disk->format = LIBXL_DISK_FORMAT_RAW;
- x_disk->backend = LIBXL_DISK_BACKEND_TAP;
+ x_disk->backend = LIBXL_DISK_BACKEND_UNKNOWN;
}
/* XXX is this right? */
--
1.8.0.1
12 years, 1 month
[libvirt] [PATCH] Remove a couple of misplaced VIR_FREE
by John Ferlan
While working on the hellolibvirt example code, I stumbled across a
couple extraneous VIR_FREE()'s in qemuStop(). I was looking at all
callers of virConnectListAllDomains()...
---
src/qemu/qemu_driver.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index be01ec6..45bd341 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -920,9 +920,6 @@ qemuStop(void) {
if (virDomainManagedSave(domains[i], flags[i]) < 0)
ret = -1;
- VIR_FREE(domains);
- VIR_FREE(flags);
-
cleanup:
for (i = 0 ; i < numDomains ; i++)
virDomainFree(domains[i]);
--
1.7.11.7
12 years, 1 month
[libvirt] libvirt-libxl driver defaulting to tap disk and not working (on Fedora 18 and rawhide)
by Dario Faggioli
Hi Jim, Everyone,
I'm having some issues when trying out libvirt-libxl driver on my
Fedora, bot on Fedora 18 and Fedorra rawhide (the former seems to be
running 0.10.2.3, the later 1.0.2).
Here's what happen:
[root@localhost ~]# systemctl status xend.service
xend.service - Xend - interface between hypervisor and some applications
Loaded: loaded (/usr/lib/systemd/system/xend.service; disabled)
Active: inactive (dead)
[root@localhost ~]# virt-install -l http://fedora.mirror.constant.com/linux/releases/18/Fedora/x86_64/os/ --ram 1024 --disk /dev/vms/F18_x64 --name F18_x64
Starting install...
Retrieving file .treeinfo... | 2.2 kB 00:00 !!!
Retrieving file vmlinuz... | 9.3 MB 00:06 !!!
Retrieving file initrd.img... | 53 MB 00:33 !!!
ERROR internal error libxenlight failed to create new domain 'F18_x64'
Domain installation does not appear to have been successful.
If it was, you can restart your domain by running:
virsh --connect xen:/// start F18_x64
otherwise, please restart your installation.
[root@localhost ~]# cat /var/log/libvirt/libxl/libxl.log
...
libxl: verbose: libxl_create.c:158:libxl__domain_build_info_setdefault: qemu-xen is unavailable, use qemu-xen-traditional instead: No such file or directory
libxl: debug: libxl_create.c:1174:do_domain_create: ao 0x7f566c008d90: create: how=(nil) callback=(nil) poller=0x7f566c009030
libxl: debug: libxl_device.c:229:libxl__device_disk_set_backend: Disk vdev=xvda spec.backend=tap
libxl: debug: libxl_device.c:184:disk_try_backend: Disk vdev=xvda, backend tap unsuitable because blktap not available
libxl: error: libxl_device.c:269:libxl__device_disk_set_backend: no suitable backend for disk xvda
libxl: debug: libxl_event.c:1499:libxl__ao_complete: ao 0x7f566c008d90: complete, rc=-3
libxl: debug: libxl_create.c:1187:do_domain_create: ao 0x7f566c008d90: inprogress: poller=0x7f566c009030, flags=ic
libxl: debug: libxl_event.c:1471:libxl__ao__destroy: ao 0x7f566c008d90: destroy
So, it looks like it tries to use blktap, even if I'm using an LVM
volume, and fails. If I go for this:
[root@localhost ~]# virt-install -l http://fedora.mirror.constant.com/linux/releases/18/Fedora/x86_64/os/ --ram 1024 --disk /dev/vms/F18_x64,driver_name=phy --name F18_x64 --bridge virbr0
it works, but only because (or at least so it looks to me) I'm manually
providing ",driver_name=phy".
Also (as it could have been expected, I guess) there is no way I can get
it to work with a file backed VHD.
Is all this supposed to happen?
I haven't had the chance to try out the code from git yet, do you think
it would make any difference?
FYI, I've also opened this Fedora bugreport, about this very same issue:
https://bugzilla.redhat.com/show_bug.cgi?id=912488
Thanks and Regards,
Dario
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
12 years, 1 month
[libvirt] libvirt-client leaks memory, Ubuntu and Debian-specific
by Igor Lukyanov
Hello.
We faced a very strange leak while using libvirt library in long-running server application for cluster orchestration.
Leak does not directly related to libvirt code and exposed only on specific build options (?) and/or system environment (?).
Here are the key points:
1. Libvirt client leaks memory while making (RPC) calls to a server. I mean that RSS memory usage shown by ps, top, etc. indefinitely grows (plz check an attachment ps.log). Test app attached.
2. Leak detected on Debian and Ubuntu and absent on Mac OS and Gentoo, so it's exactly an environment or build problem.
3. Valgrind does not see the leak. From valgrind's point of view from start to finish application constantly consumes 110kb of memory (while ps shows multiple megabytes) and does not contain any leaks.
4. Logging activity of virMalloc, virRealloc, etc. functions does not show anything: as expected, all allocated memory is correctly freed, so it's definitely not a bug of code (we tested that before recognized that problem is distrib/platform specific).
Some useful logs and test code attached. I think digging build options and legoing system libraries will help us to beat the problem but it would be nice if someone already had a working solution. Thank you for help.
12 years, 1 month
[libvirt] [PATCH 0/6 v3] Shared disk table related fixes/improvements
by Osier Yang
Inspires by the crash when using CD-ROM or Floppy with empty source,
except the fixes for the crashes, this also enrichs the hash table
with a list of names of domain which use the shared disk. And to keep
the hash table to be in consistent state, this also fixes the problems
on adding/removing the hash entry when updateing/ejecting media of
CD-ROM or Floppy. And updates the hash table when reconnecting domain
process.
v2 - v3:
* Rebase on the top
* Small changes after more testing.
Osier Yang (6):
qemu: Add checking in helpers for sgio setting
qemu: Merge qemuCheckSharedDisk into qemuAddSharedDisk
qemu: Record names of domain which uses the shared disk in hash table
qemu: Update shared disk table when reconnecting qemu process
qemu: Move the shared disk adding and sgio setting prior to attaching
qemu: Remove the shared disk entry if the operation is ejecting or
updating
src/conf/domain_conf.c | 20 ++++
src/conf/domain_conf.h | 3 +
src/libvirt_private.syms | 1 +
src/qemu/qemu_conf.c | 243 ++++++++++++++++++++++++++++++++++++++++++---
src/qemu/qemu_conf.h | 26 ++++-
src/qemu/qemu_driver.c | 108 ++++++++++++++++-----
src/qemu/qemu_hotplug.c | 19 +----
src/qemu/qemu_hotplug.h | 1 +
src/qemu/qemu_process.c | 81 ++++------------
src/qemu/qemu_process.h | 3 -
10 files changed, 377 insertions(+), 128 deletions(-)
--
1.7.7.6
12 years, 1 month