Commit eb52f66f authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/kuap: KUAP enabling/disabling functions must be __always_inline

Objtool reports following warnings:

  arch/powerpc/kernel/signal_32.o: warning: objtool:
    __prevent_user_access.constprop.0+0x4 (.text+0x4):
    redundant UACCESS disable

  arch/powerpc/kernel/signal_32.o: warning: objtool: user_access_begin+0x2c
    (.text+0x4c): return with UACCESS enabled

  arch/powerpc/kernel/signal_32.o: warning: objtool: handle_rt_signal32+0x188
    (.text+0x360): call to __prevent_user_access.constprop.0() with UACCESS enabled

  arch/powerpc/kernel/signal_32.o: warning: objtool: handle_signal32+0x150
    (.text+0x4d4): call to __prevent_user_access.constprop.0() with UACCESS enabled

This is due to some KUAP enabling/disabling functions being outline
allthough they are marked inline. Use __always_inline instead.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/ca5e50ddbec3867db5146ebddbc9a1dc0e443bc8.1689091022.git.christophe.leroy@csgroup.eu
parent 5222a1d5
...@@ -15,19 +15,19 @@ ...@@ -15,19 +15,19 @@
#define KUAP_NONE (~0UL) #define KUAP_NONE (~0UL)
static inline void kuap_lock_one(unsigned long addr) static __always_inline void kuap_lock_one(unsigned long addr)
{ {
mtsr(mfsr(addr) | SR_KS, addr); mtsr(mfsr(addr) | SR_KS, addr);
isync(); /* Context sync required after mtsr() */ isync(); /* Context sync required after mtsr() */
} }
static inline void kuap_unlock_one(unsigned long addr) static __always_inline void kuap_unlock_one(unsigned long addr)
{ {
mtsr(mfsr(addr) & ~SR_KS, addr); mtsr(mfsr(addr) & ~SR_KS, addr);
isync(); /* Context sync required after mtsr() */ isync(); /* Context sync required after mtsr() */
} }
static inline void __kuap_save_and_lock(struct pt_regs *regs) static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{ {
unsigned long kuap = current->thread.kuap; unsigned long kuap = current->thread.kuap;
...@@ -40,11 +40,11 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs) ...@@ -40,11 +40,11 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs)
} }
#define __kuap_save_and_lock __kuap_save_and_lock #define __kuap_save_and_lock __kuap_save_and_lock
static inline void kuap_user_restore(struct pt_regs *regs) static __always_inline void kuap_user_restore(struct pt_regs *regs)
{ {
} }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{ {
if (unlikely(kuap != KUAP_NONE)) { if (unlikely(kuap != KUAP_NONE)) {
current->thread.kuap = KUAP_NONE; current->thread.kuap = KUAP_NONE;
...@@ -59,7 +59,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua ...@@ -59,7 +59,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
kuap_unlock_one(regs->kuap); kuap_unlock_one(regs->kuap);
} }
static inline unsigned long __kuap_get_and_assert_locked(void) static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{ {
unsigned long kuap = current->thread.kuap; unsigned long kuap = current->thread.kuap;
...@@ -94,7 +94,7 @@ static __always_inline void __prevent_user_access(unsigned long dir) ...@@ -94,7 +94,7 @@ static __always_inline void __prevent_user_access(unsigned long dir)
kuap_lock_one(kuap); kuap_lock_one(kuap);
} }
static inline unsigned long __prevent_user_access_return(void) static __always_inline unsigned long __prevent_user_access_return(void)
{ {
unsigned long flags = current->thread.kuap; unsigned long flags = current->thread.kuap;
...@@ -106,7 +106,7 @@ static inline unsigned long __prevent_user_access_return(void) ...@@ -106,7 +106,7 @@ static inline unsigned long __prevent_user_access_return(void)
return flags; return flags;
} }
static inline void __restore_user_access(unsigned long flags) static __always_inline void __restore_user_access(unsigned long flags)
{ {
if (flags != KUAP_NONE) { if (flags != KUAP_NONE) {
current->thread.kuap = flags; current->thread.kuap = flags;
...@@ -114,7 +114,7 @@ static inline void __restore_user_access(unsigned long flags) ...@@ -114,7 +114,7 @@ static inline void __restore_user_access(unsigned long flags)
} }
} }
static inline bool static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
unsigned long kuap = regs->kuap; unsigned long kuap = regs->kuap;
......
...@@ -213,14 +213,14 @@ extern u64 __ro_after_init default_iamr; ...@@ -213,14 +213,14 @@ extern u64 __ro_after_init default_iamr;
* access restrictions. Because of this ignore AMR value when accessing * access restrictions. Because of this ignore AMR value when accessing
* userspace via kernel thread. * userspace via kernel thread.
*/ */
static inline u64 current_thread_amr(void) static __always_inline u64 current_thread_amr(void)
{ {
if (current->thread.regs) if (current->thread.regs)
return current->thread.regs->amr; return current->thread.regs->amr;
return default_amr; return default_amr;
} }
static inline u64 current_thread_iamr(void) static __always_inline u64 current_thread_iamr(void)
{ {
if (current->thread.regs) if (current->thread.regs)
return current->thread.regs->iamr; return current->thread.regs->iamr;
...@@ -230,7 +230,7 @@ static inline u64 current_thread_iamr(void) ...@@ -230,7 +230,7 @@ static inline u64 current_thread_iamr(void)
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
static inline void kuap_user_restore(struct pt_regs *regs) static __always_inline void kuap_user_restore(struct pt_regs *regs)
{ {
bool restore_amr = false, restore_iamr = false; bool restore_amr = false, restore_iamr = false;
unsigned long amr, iamr; unsigned long amr, iamr;
...@@ -269,7 +269,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) ...@@ -269,7 +269,7 @@ static inline void kuap_user_restore(struct pt_regs *regs)
*/ */
} }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{ {
if (likely(regs->amr == amr)) if (likely(regs->amr == amr))
return; return;
...@@ -285,7 +285,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr ...@@ -285,7 +285,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr
*/ */
} }
static inline unsigned long __kuap_get_and_assert_locked(void) static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{ {
unsigned long amr = mfspr(SPRN_AMR); unsigned long amr = mfspr(SPRN_AMR);
...@@ -302,7 +302,7 @@ static inline unsigned long __kuap_get_and_assert_locked(void) ...@@ -302,7 +302,7 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
* because that would require an expensive read/modify write of the AMR. * because that would require an expensive read/modify write of the AMR.
*/ */
static inline unsigned long get_kuap(void) static __always_inline unsigned long get_kuap(void)
{ {
/* /*
* We return AMR_KUAP_BLOCKED when we don't support KUAP because * We return AMR_KUAP_BLOCKED when we don't support KUAP because
...@@ -332,7 +332,8 @@ static __always_inline void set_kuap(unsigned long value) ...@@ -332,7 +332,8 @@ static __always_inline void set_kuap(unsigned long value)
isync(); isync();
} }
static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
/* /*
* For radix this will be a storage protection fault (DSISR_PROTFAULT). * For radix this will be a storage protection fault (DSISR_PROTFAULT).
...@@ -375,12 +376,12 @@ static __always_inline void allow_user_access(void __user *to, const void __user ...@@ -375,12 +376,12 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#else /* CONFIG_PPC_KUAP */ #else /* CONFIG_PPC_KUAP */
static inline unsigned long get_kuap(void) static __always_inline unsigned long get_kuap(void)
{ {
return AMR_KUAP_BLOCKED; return AMR_KUAP_BLOCKED;
} }
static inline void set_kuap(unsigned long value) { } static __always_inline void set_kuap(unsigned long value) { }
static __always_inline void allow_user_access(void __user *to, const void __user *from, static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) unsigned long size, unsigned long dir)
...@@ -395,7 +396,7 @@ static __always_inline void prevent_user_access(unsigned long dir) ...@@ -395,7 +396,7 @@ static __always_inline void prevent_user_access(unsigned long dir)
do_uaccess_flush(); do_uaccess_flush();
} }
static inline unsigned long prevent_user_access_return(void) static __always_inline unsigned long prevent_user_access_return(void)
{ {
unsigned long flags = get_kuap(); unsigned long flags = get_kuap();
...@@ -406,7 +407,7 @@ static inline unsigned long prevent_user_access_return(void) ...@@ -406,7 +407,7 @@ static inline unsigned long prevent_user_access_return(void)
return flags; return flags;
} }
static inline void restore_user_access(unsigned long flags) static __always_inline void restore_user_access(unsigned long flags)
{ {
set_kuap(flags); set_kuap(flags);
if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
......
...@@ -57,14 +57,14 @@ static inline void setup_kuap(bool disabled) { } ...@@ -57,14 +57,14 @@ static inline void setup_kuap(bool disabled) { }
static __always_inline bool kuap_is_disabled(void) { return true; } static __always_inline bool kuap_is_disabled(void) { return true; }
static inline bool static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
return false; return false;
} }
static inline void kuap_user_restore(struct pt_regs *regs) { } static __always_inline void kuap_user_restore(struct pt_regs *regs) { }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
/* /*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
...@@ -72,11 +72,11 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr ...@@ -72,11 +72,11 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr
* platforms. * platforms.
*/ */
#ifndef CONFIG_PPC_BOOK3S_64 #ifndef CONFIG_PPC_BOOK3S_64
static inline void __allow_user_access(void __user *to, const void __user *from, static __always_inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { } unsigned long size, unsigned long dir) { }
static inline void __prevent_user_access(unsigned long dir) { } static __always_inline void __prevent_user_access(unsigned long dir) { }
static inline unsigned long __prevent_user_access_return(void) { return 0UL; } static __always_inline unsigned long __prevent_user_access_return(void) { return 0UL; }
static inline void __restore_user_access(unsigned long flags) { } static __always_inline void __restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */ #endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */ #endif /* CONFIG_PPC_KUAP */
......
...@@ -11,24 +11,24 @@ ...@@ -11,24 +11,24 @@
#include <asm/reg.h> #include <asm/reg.h>
static inline void __kuap_save_and_lock(struct pt_regs *regs) static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{ {
regs->kuap = mfspr(SPRN_MD_AP); regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP); mtspr(SPRN_MD_AP, MD_APG_KUAP);
} }
#define __kuap_save_and_lock __kuap_save_and_lock #define __kuap_save_and_lock __kuap_save_and_lock
static inline void kuap_user_restore(struct pt_regs *regs) static __always_inline void kuap_user_restore(struct pt_regs *regs)
{ {
} }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{ {
mtspr(SPRN_MD_AP, regs->kuap); mtspr(SPRN_MD_AP, regs->kuap);
} }
#ifdef CONFIG_PPC_KUAP_DEBUG #ifdef CONFIG_PPC_KUAP_DEBUG
static inline unsigned long __kuap_get_and_assert_locked(void) static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{ {
WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16); WARN_ON_ONCE(mfspr(SPRN_MD_AP) >> 16 != MD_APG_KUAP >> 16);
...@@ -37,18 +37,18 @@ static inline unsigned long __kuap_get_and_assert_locked(void) ...@@ -37,18 +37,18 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
#endif #endif
static inline void __allow_user_access(void __user *to, const void __user *from, static __always_inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) unsigned long size, unsigned long dir)
{ {
mtspr(SPRN_MD_AP, MD_APG_INIT); mtspr(SPRN_MD_AP, MD_APG_INIT);
} }
static inline void __prevent_user_access(unsigned long dir) static __always_inline void __prevent_user_access(unsigned long dir)
{ {
mtspr(SPRN_MD_AP, MD_APG_KUAP); mtspr(SPRN_MD_AP, MD_APG_KUAP);
} }
static inline unsigned long __prevent_user_access_return(void) static __always_inline unsigned long __prevent_user_access_return(void)
{ {
unsigned long flags; unsigned long flags;
...@@ -59,12 +59,12 @@ static inline unsigned long __prevent_user_access_return(void) ...@@ -59,12 +59,12 @@ static inline unsigned long __prevent_user_access_return(void)
return flags; return flags;
} }
static inline void __restore_user_access(unsigned long flags) static __always_inline void __restore_user_access(unsigned long flags)
{ {
mtspr(SPRN_MD_AP, flags); mtspr(SPRN_MD_AP, flags);
} }
static inline bool static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
......
...@@ -17,14 +17,14 @@ ...@@ -17,14 +17,14 @@
#include <asm/reg.h> #include <asm/reg.h>
static inline void __kuap_lock(void) static __always_inline void __kuap_lock(void)
{ {
mtspr(SPRN_PID, 0); mtspr(SPRN_PID, 0);
isync(); isync();
} }
#define __kuap_lock __kuap_lock #define __kuap_lock __kuap_lock
static inline void __kuap_save_and_lock(struct pt_regs *regs) static __always_inline void __kuap_save_and_lock(struct pt_regs *regs)
{ {
regs->kuap = mfspr(SPRN_PID); regs->kuap = mfspr(SPRN_PID);
mtspr(SPRN_PID, 0); mtspr(SPRN_PID, 0);
...@@ -32,7 +32,7 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs) ...@@ -32,7 +32,7 @@ static inline void __kuap_save_and_lock(struct pt_regs *regs)
} }
#define __kuap_save_and_lock __kuap_save_and_lock #define __kuap_save_and_lock __kuap_save_and_lock
static inline void kuap_user_restore(struct pt_regs *regs) static __always_inline void kuap_user_restore(struct pt_regs *regs)
{ {
if (kuap_is_disabled()) if (kuap_is_disabled())
return; return;
...@@ -42,7 +42,7 @@ static inline void kuap_user_restore(struct pt_regs *regs) ...@@ -42,7 +42,7 @@ static inline void kuap_user_restore(struct pt_regs *regs)
/* Context synchronisation is performed by rfi */ /* Context synchronisation is performed by rfi */
} }
static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) static __always_inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{ {
if (regs->kuap) if (regs->kuap)
mtspr(SPRN_PID, current->thread.pid); mtspr(SPRN_PID, current->thread.pid);
...@@ -51,7 +51,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua ...@@ -51,7 +51,7 @@ static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kua
} }
#ifdef CONFIG_PPC_KUAP_DEBUG #ifdef CONFIG_PPC_KUAP_DEBUG
static inline unsigned long __kuap_get_and_assert_locked(void) static __always_inline unsigned long __kuap_get_and_assert_locked(void)
{ {
WARN_ON_ONCE(mfspr(SPRN_PID)); WARN_ON_ONCE(mfspr(SPRN_PID));
...@@ -60,20 +60,20 @@ static inline unsigned long __kuap_get_and_assert_locked(void) ...@@ -60,20 +60,20 @@ static inline unsigned long __kuap_get_and_assert_locked(void)
#define __kuap_get_and_assert_locked __kuap_get_and_assert_locked #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked
#endif #endif
static inline void __allow_user_access(void __user *to, const void __user *from, static __always_inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) unsigned long size, unsigned long dir)
{ {
mtspr(SPRN_PID, current->thread.pid); mtspr(SPRN_PID, current->thread.pid);
isync(); isync();
} }
static inline void __prevent_user_access(unsigned long dir) static __always_inline void __prevent_user_access(unsigned long dir)
{ {
mtspr(SPRN_PID, 0); mtspr(SPRN_PID, 0);
isync(); isync();
} }
static inline unsigned long __prevent_user_access_return(void) static __always_inline unsigned long __prevent_user_access_return(void)
{ {
unsigned long flags = mfspr(SPRN_PID); unsigned long flags = mfspr(SPRN_PID);
...@@ -83,7 +83,7 @@ static inline unsigned long __prevent_user_access_return(void) ...@@ -83,7 +83,7 @@ static inline unsigned long __prevent_user_access_return(void)
return flags; return flags;
} }
static inline void __restore_user_access(unsigned long flags) static __always_inline void __restore_user_access(unsigned long flags)
{ {
if (flags) { if (flags) {
mtspr(SPRN_PID, current->thread.pid); mtspr(SPRN_PID, current->thread.pid);
...@@ -91,7 +91,7 @@ static inline void __restore_user_access(unsigned long flags) ...@@ -91,7 +91,7 @@ static inline void __restore_user_access(unsigned long flags)
} }
} }
static inline bool static __always_inline bool
__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
return !regs->kuap; return !regs->kuap;
......
...@@ -386,7 +386,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n) ...@@ -386,7 +386,7 @@ copy_mc_to_user(void __user *to, const void *from, unsigned long n)
extern long __copy_from_user_flushcache(void *dst, const void __user *src, extern long __copy_from_user_flushcache(void *dst, const void __user *src,
unsigned size); unsigned size);
static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{ {
if (unlikely(!access_ok(ptr, len))) if (unlikely(!access_ok(ptr, len)))
return false; return false;
...@@ -401,7 +401,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t ...@@ -401,7 +401,7 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_save prevent_user_access_return #define user_access_save prevent_user_access_return
#define user_access_restore restore_user_access #define user_access_restore restore_user_access
static __must_check inline bool static __must_check __always_inline bool
user_read_access_begin(const void __user *ptr, size_t len) user_read_access_begin(const void __user *ptr, size_t len)
{ {
if (unlikely(!access_ok(ptr, len))) if (unlikely(!access_ok(ptr, len)))
...@@ -415,7 +415,7 @@ user_read_access_begin(const void __user *ptr, size_t len) ...@@ -415,7 +415,7 @@ user_read_access_begin(const void __user *ptr, size_t len)
#define user_read_access_begin user_read_access_begin #define user_read_access_begin user_read_access_begin
#define user_read_access_end prevent_current_read_from_user #define user_read_access_end prevent_current_read_from_user
static __must_check inline bool static __must_check __always_inline bool
user_write_access_begin(const void __user *ptr, size_t len) user_write_access_begin(const void __user *ptr, size_t len)
{ {
if (unlikely(!access_ok(ptr, len))) if (unlikely(!access_ok(ptr, len)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment