Commit b6b19f25 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

ksm: make rmap walks more scalable

The rmap walks in ksm.c are like those in rmap.c: they can safely be
done with anon_vma_lock_read().
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2832bc19
...@@ -1624,7 +1624,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, ...@@ -1624,7 +1624,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
struct vm_area_struct *vma; struct vm_area_struct *vma;
anon_vma_lock_write(anon_vma); anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) { 0, ULONG_MAX) {
vma = vmac->vma; vma = vmac->vma;
...@@ -1648,7 +1648,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, ...@@ -1648,7 +1648,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
if (!search_new_forks || !mapcount) if (!search_new_forks || !mapcount)
break; break;
} }
anon_vma_unlock(anon_vma); anon_vma_unlock_read(anon_vma);
if (!mapcount) if (!mapcount)
goto out; goto out;
} }
...@@ -1678,7 +1678,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) ...@@ -1678,7 +1678,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
struct vm_area_struct *vma; struct vm_area_struct *vma;
anon_vma_lock_write(anon_vma); anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) { 0, ULONG_MAX) {
vma = vmac->vma; vma = vmac->vma;
...@@ -1697,11 +1697,11 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) ...@@ -1697,11 +1697,11 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
ret = try_to_unmap_one(page, vma, ret = try_to_unmap_one(page, vma,
rmap_item->address, flags); rmap_item->address, flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) { if (ret != SWAP_AGAIN || !page_mapped(page)) {
anon_vma_unlock(anon_vma); anon_vma_unlock_read(anon_vma);
goto out; goto out;
} }
} }
anon_vma_unlock(anon_vma); anon_vma_unlock_read(anon_vma);
} }
if (!search_new_forks++) if (!search_new_forks++)
goto again; goto again;
...@@ -1731,7 +1731,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, ...@@ -1731,7 +1731,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct anon_vma_chain *vmac; struct anon_vma_chain *vmac;
struct vm_area_struct *vma; struct vm_area_struct *vma;
anon_vma_lock_write(anon_vma); anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) { 0, ULONG_MAX) {
vma = vmac->vma; vma = vmac->vma;
...@@ -1749,11 +1749,11 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, ...@@ -1749,11 +1749,11 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
ret = rmap_one(page, vma, rmap_item->address, arg); ret = rmap_one(page, vma, rmap_item->address, arg);
if (ret != SWAP_AGAIN) { if (ret != SWAP_AGAIN) {
anon_vma_unlock(anon_vma); anon_vma_unlock_read(anon_vma);
goto out; goto out;
} }
} }
anon_vma_unlock(anon_vma); anon_vma_unlock_read(anon_vma);
} }
if (!search_new_forks++) if (!search_new_forks++)
goto again; goto again;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment