Commit 4c4acd24 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] rmap 1 linux/rmap.h

From: Hugh Dickins <hugh@veritas.com>

First of a batch of three rmap patches: this initial batch of three paving
the way for a move to some form of object-based rmap (probably Andrea's, but
drawing from mine too), and making almost no functional change by itself.  A
few days will intervene before the next batch, to give the struct page
changes in the second patch some exposure before proceeding.

rmap 1 create include/linux/rmap.h

Start small: linux/rmap-locking.h has already gathered some declarations
unrelated to locking, and the rest of the rmap declarations were over in
linux/swap.h: gather them all together in linux/rmap.h, and rename the
pte_chain_lock to rmap_lock.
parent 3e2ea65d
......@@ -45,7 +45,7 @@
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
......
......@@ -69,7 +69,7 @@
#define PG_private 12 /* Has something at ->private */
#define PG_writeback 13 /* Page is under writeback */
#define PG_nosave 14 /* Used for system suspend/resume */
#define PG_chainlock 15 /* lock bit for ->pte_chain */
#define PG_maplock 15 /* Lock bit for rmap to ptes */
#define PG_direct 16 /* ->pte_chain points directly at pte */
#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
......
/*
* include/linux/rmap-locking.h
*
* Locking primitives for exclusive access to a page's reverse-mapping
* pte chain.
*/
#include <linux/slab.h>
struct pte_chain;
extern kmem_cache_t *pte_chain_cache;
#define pte_chain_lock(page) bit_spin_lock(PG_chainlock, (unsigned long *)&page->flags)
#define pte_chain_unlock(page) bit_spin_unlock(PG_chainlock, (unsigned long *)&page->flags)
struct pte_chain *pte_chain_alloc(int gfp_flags);
void __pte_chain_free(struct pte_chain *pte_chain);
static inline void pte_chain_free(struct pte_chain *pte_chain)
{
if (pte_chain)
__pte_chain_free(pte_chain);
}
#ifndef _LINUX_RMAP_H
#define _LINUX_RMAP_H
/*
* Declarations for Reverse Mapping functions in mm/rmap.c
* Its structures are declared within that file.
*/
#include <linux/config.h>
#include <linux/linkage.h>
#define rmap_lock(page) \
bit_spin_lock(PG_maplock, (unsigned long *)&(page)->flags)
#define rmap_unlock(page) \
bit_spin_unlock(PG_maplock, (unsigned long *)&(page)->flags)
#ifdef CONFIG_MMU
struct pte_chain;
struct pte_chain *pte_chain_alloc(int gfp_flags);
void __pte_chain_free(struct pte_chain *pte_chain);
static inline void pte_chain_free(struct pte_chain *pte_chain)
{
if (pte_chain)
__pte_chain_free(pte_chain);
}
struct pte_chain * fastcall
page_add_rmap(struct page *, pte_t *, struct pte_chain *);
void fastcall page_remove_rmap(struct page *, pte_t *);
/*
* Called from mm/vmscan.c to handle paging out
*/
int fastcall page_referenced(struct page *);
int fastcall try_to_unmap(struct page *);
#else /* !CONFIG_MMU */
#define page_referenced(page) TestClearPageReferenced(page)
#define try_to_unmap(page) SWAP_FAIL
#endif /* CONFIG_MMU */
/*
* Return values of try_to_unmap
*/
#define SWAP_SUCCESS 0
#define SWAP_AGAIN 1
#define SWAP_FAIL 2
#endif /* _LINUX_RMAP_H */
......@@ -76,7 +76,6 @@ struct reclaim_state {
#ifdef __KERNEL__
struct address_space;
struct pte_chain;
struct sysinfo;
struct writeback_control;
struct zone;
......@@ -177,26 +176,11 @@ extern int try_to_free_pages(struct zone **, unsigned int, unsigned int);
extern int shrink_all_memory(int);
extern int vm_swappiness;
/* linux/mm/rmap.c */
#ifdef CONFIG_MMU
int FASTCALL(page_referenced(struct page *));
struct pte_chain *FASTCALL(page_add_rmap(struct page *, pte_t *,
struct pte_chain *));
void FASTCALL(page_remove_rmap(struct page *, pte_t *));
int FASTCALL(try_to_unmap(struct page *));
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
#else
#define page_referenced(page) TestClearPageReferenced(page)
#define try_to_unmap(page) SWAP_FAIL
#endif /* CONFIG_MMU */
/* return values of try_to_unmap */
#define SWAP_SUCCESS 0
#define SWAP_AGAIN 1
#define SWAP_FAIL 2
extern void swap_unplug_io_fn(struct backing_dev_info *);
#ifdef CONFIG_SWAP
......
......@@ -12,7 +12,7 @@
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/swapops.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <asm/mmu_context.h>
......
......@@ -43,7 +43,7 @@
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/init.h>
......
......@@ -15,7 +15,7 @@
#include <linux/swap.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <asm/uaccess.h>
......
......@@ -13,7 +13,7 @@
/*
* Locking:
* - the page->pte.chain is protected by the PG_chainlock bit,
* - the page->pte.chain is protected by the PG_maplock bit,
* which nests within the the mm->page_table_lock,
* which nests within the page lock.
* - because swapout locking is opposite to the locking order
......@@ -26,7 +26,7 @@
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/cache.h>
#include <linux/percpu.h>
......@@ -108,7 +108,7 @@ pte_chain_encode(struct pte_chain *pte_chain, int idx)
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of processes which referenced the page.
* Caller needs to hold the pte_chain_lock.
* Caller needs to hold the rmap lock.
*
* If the page has a single-entry pte_chain, collapse that back to a PageDirect
* representation. This way, it's only done under memory pressure.
......@@ -175,7 +175,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
if (PageReserved(page))
return pte_chain;
pte_chain_lock(page);
rmap_lock(page);
if (page->pte.direct == 0) {
page->pte.direct = pte_paddr;
......@@ -208,7 +208,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
cur_pte_chain->ptes[pte_chain_idx(cur_pte_chain) - 1] = pte_paddr;
cur_pte_chain->next_and_idx--;
out:
pte_chain_unlock(page);
rmap_unlock(page);
return pte_chain;
}
......@@ -230,7 +230,7 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
if (!pfn_valid(page_to_pfn(page)) || PageReserved(page))
return;
pte_chain_lock(page);
rmap_lock(page);
if (!page_mapped(page))
goto out_unlock; /* remap_page_range() from a driver? */
......@@ -276,8 +276,7 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
if (!page_mapped(page))
dec_page_state(nr_mapped);
out_unlock:
pte_chain_unlock(page);
return;
rmap_unlock(page);
}
/**
......@@ -290,10 +289,9 @@ void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
* to the locking order used by the page fault path, we use trylocks.
* Locking:
* page lock shrink_list(), trylock
* pte_chain_lock shrink_list()
* rmap lock shrink_list()
* mm->page_table_lock try_to_unmap_one(), trylock
*/
static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t));
static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr)
{
pte_t *ptep = rmap_ptep_map(paddr);
......@@ -376,7 +374,7 @@ static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr)
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock
* and its pte chain lock. Return values are:
* and its rmap lock. Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
* SWAP_AGAIN - we missed a trylock, try again later
......
......@@ -21,7 +21,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
......
......@@ -28,7 +28,7 @@
#include <linux/mm_inline.h>
#include <linux/pagevec.h>
#include <linux/backing-dev.h>
#include <linux/rmap-locking.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
......@@ -173,7 +173,7 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask)
return 0;
}
/* Must be called with page's pte_chain_lock held. */
/* Must be called with page's rmap lock held. */
static inline int page_mapping_inuse(struct page *page)
{
struct address_space *mapping = page->mapping;
......@@ -278,11 +278,11 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (PageWriteback(page))
goto keep_locked;
pte_chain_lock(page);
rmap_lock(page);
referenced = page_referenced(page);
if (referenced && page_mapping_inuse(page)) {
/* In active use or really unfreeable. Activate it. */
pte_chain_unlock(page);
rmap_unlock(page);
goto activate_locked;
}
......@@ -296,10 +296,10 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
* XXX: implement swap clustering ?
*/
if (page_mapped(page) && !mapping && !PagePrivate(page)) {
pte_chain_unlock(page);
rmap_unlock(page);
if (!add_to_swap(page))
goto activate_locked;
pte_chain_lock(page);
rmap_lock(page);
mapping = page->mapping;
}
#endif /* CONFIG_SWAP */
......@@ -314,16 +314,16 @@ shrink_list(struct list_head *page_list, unsigned int gfp_mask,
if (page_mapped(page) && mapping) {
switch (try_to_unmap(page)) {
case SWAP_FAIL:
pte_chain_unlock(page);
rmap_unlock(page);
goto activate_locked;
case SWAP_AGAIN:
pte_chain_unlock(page);
rmap_unlock(page);
goto keep_locked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
}
pte_chain_unlock(page);
rmap_unlock(page);
/*
* If the page is dirty, only perform writeback if that write
......@@ -657,13 +657,13 @@ refill_inactive_zone(struct zone *zone, const int nr_pages_in,
list_add(&page->lru, &l_active);
continue;
}
pte_chain_lock(page);
rmap_lock(page);
if (page_referenced(page)) {
pte_chain_unlock(page);
rmap_unlock(page);
list_add(&page->lru, &l_active);
continue;
}
pte_chain_unlock(page);
rmap_unlock(page);
}
/*
* FIXME: need to consider page_count(page) here if/when we
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment