Commit 99b3f8f7 authored by David Hildenbrand's avatar David Hildenbrand Committed by Alexander Gordeev

s390/uv: Implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE

Let's also implement HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE, so we can convert
arch_make_page_accessible() to be a simple wrapper around
arch_make_folio_accessible(). Unfortunately, we cannot do that in the
header.

There are only two arch_make_page_accessible() calls remaining in gup.c.
We can now drop HAVE_ARCH_MAKE_PAGE_ACCESSIBLE completely form core-MM.
We'll handle that separately, once the s390x part landed.
Suggested-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20240508182955.358628-10-david@redhat.comSigned-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 7d171434
......@@ -162,6 +162,7 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
struct page;
struct folio;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
......@@ -174,6 +175,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define HAVE_ARCH_ALLOC_PAGE
#if IS_ENABLED(CONFIG_PGSTE)
int arch_make_folio_accessible(struct folio *folio);
#define HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
int arch_make_page_accessible(struct page *page);
#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
#endif
......
......@@ -498,14 +498,13 @@ int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
EXPORT_SYMBOL_GPL(gmap_destroy_page);
/*
* To be called with the page locked or with an extra reference! This will
* prevent gmap_make_secure from touching the page concurrently. Having 2
* parallel make_page_accessible is fine, as the UV calls will become a
* no-op if the page is already exported.
* To be called with the folio locked or with an extra reference! This will
* prevent gmap_make_secure from touching the folio concurrently. Having 2
* parallel arch_make_folio_accessible is fine, as the UV calls will become a
* no-op if the folio is already exported.
*/
int arch_make_page_accessible(struct page *page)
int arch_make_folio_accessible(struct folio *folio)
{
struct folio *folio = page_folio(page);
int rc = 0;
/* See gmap_make_secure(): large folios cannot be secure */
......@@ -537,8 +536,13 @@ int arch_make_page_accessible(struct page *page)
return rc;
}
EXPORT_SYMBOL_GPL(arch_make_page_accessible);
EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
int arch_make_page_accessible(struct page *page)
{
return arch_make_folio_accessible(page_folio(page));
}
EXPORT_SYMBOL_GPL(arch_make_page_accessible);
#endif
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
......
......@@ -492,6 +492,7 @@ void do_secure_storage_access(struct pt_regs *regs)
unsigned long addr = get_fault_address(regs);
struct vm_area_struct *vma;
struct mm_struct *mm;
struct folio *folio;
struct page *page;
struct gmap *gmap;
int rc;
......@@ -539,17 +540,18 @@ void do_secure_storage_access(struct pt_regs *regs)
mmap_read_unlock(mm);
break;
}
if (arch_make_page_accessible(page))
folio = page_folio(page);
if (arch_make_folio_accessible(folio))
send_sig(SIGSEGV, current, 0);
put_page(page);
folio_put(folio);
mmap_read_unlock(mm);
break;
case KERNEL_FAULT:
page = phys_to_page(addr);
if (unlikely(!try_get_page(page)))
folio = phys_to_folio(addr);
if (unlikely(!folio_try_get(folio)))
break;
rc = arch_make_page_accessible(page);
put_page(page);
rc = arch_make_folio_accessible(folio);
folio_put(folio);
if (rc)
BUG();
break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment