Commit dcc0b490 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'powerpc-5.11-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fix from Michael Ellerman:
 "One fix for a regression seen in io_uring, introduced by our support
  for KUAP (Kernel User Access Prevention) with the Hash MMU.

  Thanks to Aneesh Kumar K.V, and Zorro Lang"

* tag 'powerpc-5.11-8' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/kuap: Allow kernel thread to access userspace after kthread_use_mm
parents c05263df 8c511eff
...@@ -199,25 +199,31 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); ...@@ -199,25 +199,31 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_PKEY #ifdef CONFIG_PPC_PKEY
extern u64 __ro_after_init default_uamor;
extern u64 __ro_after_init default_amr;
extern u64 __ro_after_init default_iamr;
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
/* /* usage of kthread_use_mm() should inherit the
* For kernel thread that doesn't have thread.regs return * AMR value of the operating address space. But, the AMR value is
* default AMR/IAMR values. * thread-specific and we inherit the address space and not thread
* access restrictions. Because of this ignore AMR value when accessing
* userspace via kernel thread.
*/ */
static inline u64 current_thread_amr(void) static inline u64 current_thread_amr(void)
{ {
if (current->thread.regs) if (current->thread.regs)
return current->thread.regs->amr; return current->thread.regs->amr;
return AMR_KUAP_BLOCKED; return default_amr;
} }
static inline u64 current_thread_iamr(void) static inline u64 current_thread_iamr(void)
{ {
if (current->thread.regs) if (current->thread.regs)
return current->thread.regs->iamr; return current->thread.regs->iamr;
return AMR_KUEP_BLOCKED; return default_iamr;
} }
#endif /* CONFIG_PPC_PKEY */ #endif /* CONFIG_PPC_PKEY */
......
...@@ -5,10 +5,6 @@ ...@@ -5,10 +5,6 @@
#include <asm/book3s/64/hash-pkey.h> #include <asm/book3s/64/hash-pkey.h>
extern u64 __ro_after_init default_uamor;
extern u64 __ro_after_init default_amr;
extern u64 __ro_after_init default_iamr;
static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags) static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
{ {
if (!mmu_has_feature(MMU_FTR_PKEY)) if (!mmu_has_feature(MMU_FTR_PKEY))
......
...@@ -31,6 +31,7 @@ static u32 initial_allocation_mask __ro_after_init; ...@@ -31,6 +31,7 @@ static u32 initial_allocation_mask __ro_after_init;
u64 default_amr __ro_after_init = ~0x0UL; u64 default_amr __ro_after_init = ~0x0UL;
u64 default_iamr __ro_after_init = 0x5555555555555555UL; u64 default_iamr __ro_after_init = 0x5555555555555555UL;
u64 default_uamor __ro_after_init; u64 default_uamor __ro_after_init;
EXPORT_SYMBOL(default_amr);
/* /*
* Key used to implement PROT_EXEC mmap. Denies READ/WRITE * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
* We pick key 2 because 0 is special key and 1 is reserved as per ISA. * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment