Commit f5ecca06 authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton

mm: convert ptlock_alloc() to use ptdescs

This removes some direct accesses to struct page, working towards
splitting out struct ptdesc from struct page.

Link: https://lkml.kernel.org/r/20230807230513.102486-6-vishal.moola@gmail.comSigned-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Guo Ren <guoren@kernel.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Palmer Dabbelt <palmer@rivosinc.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f8546d84
...@@ -2826,7 +2826,7 @@ static inline void pagetable_free(struct ptdesc *pt) ...@@ -2826,7 +2826,7 @@ static inline void pagetable_free(struct ptdesc *pt)
#if USE_SPLIT_PTE_PTLOCKS #if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS #if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void); void __init ptlock_cache_init(void);
extern bool ptlock_alloc(struct page *page); bool ptlock_alloc(struct ptdesc *ptdesc);
extern void ptlock_free(struct page *page); extern void ptlock_free(struct page *page);
static inline spinlock_t *ptlock_ptr(struct page *page) static inline spinlock_t *ptlock_ptr(struct page *page)
...@@ -2838,7 +2838,7 @@ static inline void ptlock_cache_init(void) ...@@ -2838,7 +2838,7 @@ static inline void ptlock_cache_init(void)
{ {
} }
static inline bool ptlock_alloc(struct page *page) static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{ {
return true; return true;
} }
...@@ -2868,7 +2868,7 @@ static inline bool ptlock_init(struct page *page) ...@@ -2868,7 +2868,7 @@ static inline bool ptlock_init(struct page *page)
* slab code uses page->slab_cache, which share storage with page->ptl. * slab code uses page->slab_cache, which share storage with page->ptl.
*/ */
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page)) if (!ptlock_alloc(page_ptdesc(page)))
return false; return false;
spin_lock_init(ptlock_ptr(page)); spin_lock_init(ptlock_ptr(page));
return true; return true;
......
...@@ -6114,14 +6114,14 @@ void __init ptlock_cache_init(void) ...@@ -6114,14 +6114,14 @@ void __init ptlock_cache_init(void)
SLAB_PANIC, NULL); SLAB_PANIC, NULL);
} }
bool ptlock_alloc(struct page *page) bool ptlock_alloc(struct ptdesc *ptdesc)
{ {
spinlock_t *ptl; spinlock_t *ptl;
ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
if (!ptl) if (!ptl)
return false; return false;
page->ptl = ptl; ptdesc->ptl = ptl;
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment