Commit 4dedbf8d authored by David S. Miller's avatar David S. Miller Committed by Linus Torvalds

sparc64: kill page table quicklists

With the recent mmu_gather changes that included generic RCU freeing of
page-tables, it is now quite straightforward to implement gup_fast() on
sparc64.

This patch:

Remove the page table quicklists.  They are pointless and make it harder
to use RCU page table freeing and share code with other architectures.

BTW, this is the second time this has happened, see commit 3c936465
("[SPARC64]: Kill pgtable quicklists and use SLAB.")
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c15bef30
...@@ -81,10 +81,6 @@ config IOMMU_HELPER ...@@ -81,10 +81,6 @@ config IOMMU_HELPER
bool bool
default y if SPARC64 default y if SPARC64
config QUICKLIST
bool
default y if SPARC64
config STACKTRACE_SUPPORT config STACKTRACE_SUPPORT
bool bool
default y if SPARC64 default y if SPARC64
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/quicklist.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/cpudata.h> #include <asm/cpudata.h>
...@@ -14,69 +13,68 @@ ...@@ -14,69 +13,68 @@
/* Page table allocation/freeing. */ /* Page table allocation/freeing. */
extern struct kmem_cache *pgtable_cache;
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
} }
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{ {
quicklist_free(0, NULL, pgd); kmem_cache_free(pgtable_cache, pgd);
} }
#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return kmem_cache_alloc(pgtable_cache,
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
quicklist_free(0, NULL, pmd); kmem_cache_free(pgtable_cache, pmd);
} }
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return quicklist_alloc(0, GFP_KERNEL, NULL); return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
} }
static inline pgtable_t pte_alloc_one(struct mm_struct *mm, static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
struct page *page; struct page *page;
void *pg; pte_t *pte;
pg = quicklist_alloc(0, GFP_KERNEL, NULL); pte = pte_alloc_one_kernel(mm, address);
if (!pg) if (!pte)
return NULL; return NULL;
page = virt_to_page(pg); page = virt_to_page(pte);
pgtable_page_ctor(page); pgtable_page_ctor(page);
return page; return page;
} }
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{ {
quicklist_free(0, NULL, pte); free_page((unsigned long)pte);
} }
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{ {
pgtable_page_dtor(ptepage); pgtable_page_dtor(ptepage);
quicklist_free_page(0, NULL, ptepage); __free_page(ptepage);
} }
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pmd_populate(MM,PMD,PTE_PAGE) \ #define pmd_populate(MM,PMD,PTE_PAGE) \
pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) pmd_page(pmd)
static inline void check_pgt_cache(void) #define check_pgt_cache() do { } while (0)
{
quicklist_trim(0, NULL, 25, 16);
}
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
......
...@@ -236,6 +236,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign ...@@ -236,6 +236,8 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
} }
} }
struct kmem_cache *pgtable_cache __read_mostly;
static struct kmem_cache *tsb_caches[8] __read_mostly; static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = { static const char *tsb_cache_names[8] = {
...@@ -253,6 +255,15 @@ void __init pgtable_cache_init(void) ...@@ -253,6 +255,15 @@ void __init pgtable_cache_init(void)
{ {
unsigned long i; unsigned long i;
pgtable_cache = kmem_cache_create("pgtable_cache",
PAGE_SIZE, PAGE_SIZE,
0,
_clear_page);
if (!pgtable_cache) {
prom_printf("pgtable_cache_init(): Could not create!\n");
prom_halt();
}
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
unsigned long size = 8192 << i; unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i]; const char *name = tsb_cache_names[i];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment