Commit 84097518 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: nommu use compound pages

Now that compound page handling is properly fixed in the VM, move nommu
over to using compound pages rather than rolling their own refcounting.

nommu vm page refcounting is broken anyway, but there is no need to have
divergent code in the core VM now, nor when it gets fixed.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: David Howells <dhowells@redhat.com>

(Needs testing, please).
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0f8053a5
...@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) ...@@ -87,8 +87,7 @@ static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
xpages = 1UL << order; xpages = 1UL << order;
npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (loop = 0; loop < npages; loop++) split_page(pages, order);
set_page_count(pages + loop, 1);
/* trim off any pages we don't actually require */ /* trim off any pages we don't actually require */
for (loop = npages; loop < xpages; loop++) for (loop = npages; loop < xpages; loop++)
......
...@@ -327,11 +327,7 @@ static inline void get_page(struct page *page) ...@@ -327,11 +327,7 @@ static inline void get_page(struct page *page)
void put_page(struct page *page); void put_page(struct page *page);
#ifdef CONFIG_MMU
void split_page(struct page *page, unsigned int order); void split_page(struct page *page, unsigned int order);
#else
static inline void split_page(struct page *page, unsigned int order) {}
#endif
/* /*
* Multiple processes may "see" the same page. E.g. for untouched * Multiple processes may "see" the same page. E.g. for untouched
......
...@@ -15,19 +15,7 @@ ...@@ -15,19 +15,7 @@
static inline void set_page_refs(struct page *page, int order) static inline void set_page_refs(struct page *page, int order)
{ {
#ifdef CONFIG_MMU
set_page_count(page, 1); set_page_count(page, 1);
#else
int i;
/*
* We need to reference all the pages for this order, otherwise if
* anyone accesses one of the pages with (get/put) it will be freed.
* - eg: access_process_vm()
*/
for (i = 0; i < (1 << order); i++)
set_page_count(page + i, 1);
#endif /* CONFIG_MMU */
} }
static inline void __put_page(struct page *page) static inline void __put_page(struct page *page)
......
...@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) ...@@ -159,7 +159,7 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
/* /*
* kmalloc doesn't like __GFP_HIGHMEM for some reason * kmalloc doesn't like __GFP_HIGHMEM for some reason
*/ */
return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM); return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
} }
struct page * vmalloc_to_page(void *addr) struct page * vmalloc_to_page(void *addr)
...@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len) ...@@ -623,7 +623,7 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
* - note that this may not return a page-aligned address if the object * - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page * we're allocating is smaller than a page
*/ */
base = kmalloc(len, GFP_KERNEL); base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
if (!base) if (!base)
goto enomem; goto enomem;
......
...@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -422,11 +422,6 @@ static void __free_pages_ok(struct page *page, unsigned int order)
mutex_debug_check_no_locks_freed(page_address(page), mutex_debug_check_no_locks_freed(page_address(page),
PAGE_SIZE<<order); PAGE_SIZE<<order);
#ifndef CONFIG_MMU
for (i = 1 ; i < (1 << order) ; ++i)
__put_page(page + i);
#endif
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i); reserved += free_pages_check(page + i);
if (reserved) if (reserved)
...@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) ...@@ -746,7 +741,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i); clear_highpage(page + i);
} }
#ifdef CONFIG_MMU
/* /*
* split_page takes a non-compound higher-order page, and splits it into * split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n] * n (1<<order) sub-pages: page[0..n]
...@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order) ...@@ -766,7 +760,6 @@ void split_page(struct page *page, unsigned int order)
set_page_count(page + i, 1); set_page_count(page + i, 1);
} }
} }
#endif
/* /*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
......
...@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache) ...@@ -590,6 +590,8 @@ static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
static inline struct kmem_cache *page_get_cache(struct page *page) static inline struct kmem_cache *page_get_cache(struct page *page)
{ {
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return (struct kmem_cache *)page->lru.next; return (struct kmem_cache *)page->lru.next;
} }
...@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab) ...@@ -600,6 +602,8 @@ static inline void page_set_slab(struct page *page, struct slab *slab)
static inline struct slab *page_get_slab(struct page *page) static inline struct slab *page_get_slab(struct page *page)
{ {
if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page);
return (struct slab *)page->lru.prev; return (struct slab *)page->lru.prev;
} }
...@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, ...@@ -2412,8 +2416,11 @@ static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
struct page *page; struct page *page;
/* Nasty!!!!!! I hope this is OK. */ /* Nasty!!!!!! I hope this is OK. */
i = 1 << cachep->gfporder;
page = virt_to_page(objp); page = virt_to_page(objp);
i = 1;
if (likely(!PageCompound(page)))
i <<= cachep->gfporder;
do { do {
page_set_cache(page, cachep); page_set_cache(page, cachep);
page_set_slab(page, slabp); page_set_slab(page, slabp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment