Commit b348b5fe authored by Stefan Roesch's avatar Stefan Roesch Committed by Andrew Morton

mm/ksm: add pages scanned metric

ksm currently maintains several statistics, which let you determine how
successful KSM is at sharing pages.  However it does not contain a metric
to determine how much work it does.

This commit adds the pages scanned metric.  This allows the administrator
to determine how many pages have been scanned over a period of time.

Link: https://lkml.kernel.org/r/20230811193655.2518943-1-shr@devkernel.ioSigned-off-by: default avatarStefan Roesch <shr@devkernel.io>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0790e1e2
...@@ -159,6 +159,8 @@ The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``: ...@@ -159,6 +159,8 @@ The effectiveness of KSM and MADV_MERGEABLE is shown in ``/sys/kernel/mm/ksm/``:
general_profit general_profit
how effective is KSM. The calculation is explained below. how effective is KSM. The calculation is explained below.
pages_scanned
how many pages are being scanned for ksm
pages_shared pages_shared
how many shared pages are being used how many shared pages are being used
pages_sharing pages_sharing
......
...@@ -242,6 +242,9 @@ static struct kmem_cache *rmap_item_cache; ...@@ -242,6 +242,9 @@ static struct kmem_cache *rmap_item_cache;
static struct kmem_cache *stable_node_cache; static struct kmem_cache *stable_node_cache;
static struct kmem_cache *mm_slot_cache; static struct kmem_cache *mm_slot_cache;
/* The number of pages scanned */
static unsigned long ksm_pages_scanned;
/* The number of nodes in the stable tree */ /* The number of nodes in the stable tree */
static unsigned long ksm_pages_shared; static unsigned long ksm_pages_shared;
...@@ -2476,8 +2479,9 @@ static void ksm_do_scan(unsigned int scan_npages) ...@@ -2476,8 +2479,9 @@ static void ksm_do_scan(unsigned int scan_npages)
{ {
struct ksm_rmap_item *rmap_item; struct ksm_rmap_item *rmap_item;
struct page *page; struct page *page;
unsigned int npages = scan_npages;
while (scan_npages-- && likely(!freezing(current))) { while (npages-- && likely(!freezing(current))) {
cond_resched(); cond_resched();
rmap_item = scan_get_next_rmap_item(&page); rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item) if (!rmap_item)
...@@ -2485,6 +2489,8 @@ static void ksm_do_scan(unsigned int scan_npages) ...@@ -2485,6 +2489,8 @@ static void ksm_do_scan(unsigned int scan_npages)
cmp_and_merge_page(page, rmap_item); cmp_and_merge_page(page, rmap_item);
put_page(page); put_page(page);
} }
ksm_pages_scanned += scan_npages - npages;
} }
static int ksmd_should_run(void) static int ksmd_should_run(void)
...@@ -3323,6 +3329,13 @@ static ssize_t max_page_sharing_store(struct kobject *kobj, ...@@ -3323,6 +3329,13 @@ static ssize_t max_page_sharing_store(struct kobject *kobj,
} }
KSM_ATTR(max_page_sharing); KSM_ATTR(max_page_sharing);
static ssize_t pages_scanned_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
}
KSM_ATTR_RO(pages_scanned);
static ssize_t pages_shared_show(struct kobject *kobj, static ssize_t pages_shared_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
...@@ -3431,6 +3444,7 @@ static struct attribute *ksm_attrs[] = { ...@@ -3431,6 +3444,7 @@ static struct attribute *ksm_attrs[] = {
&sleep_millisecs_attr.attr, &sleep_millisecs_attr.attr,
&pages_to_scan_attr.attr, &pages_to_scan_attr.attr,
&run_attr.attr, &run_attr.attr,
&pages_scanned_attr.attr,
&pages_shared_attr.attr, &pages_shared_attr.attr,
&pages_sharing_attr.attr, &pages_sharing_attr.attr,
&pages_unshared_attr.attr, &pages_unshared_attr.attr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment