Commit 48a8ab4e authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/book3s64/pkeys: Don't update SPRN_AMR when in kernel mode.

Now that kernel correctly store/restore userspace AMR/IAMR values, avoid
manipulating AMR and IAMR from the kernel on behalf of userspace.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: default avatarSandipan Das <sandipan@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201127044424.40686-15-aneesh.kumar@linux.ibm.com
parent edc541ec
...@@ -177,6 +177,27 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); ...@@ -177,6 +177,27 @@ DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
/*
* For kernel thread that doesn't have thread.regs return
* default AMR/IAMR values.
*/
static inline u64 current_thread_amr(void)
{
if (current->thread.regs)
return current->thread.regs->amr;
return AMR_KUAP_BLOCKED;
}
static inline u64 current_thread_iamr(void)
{
if (current->thread.regs)
return current->thread.regs->iamr;
return AMR_KUEP_BLOCKED;
}
#endif /* CONFIG_PPC_PKEY */
#ifdef CONFIG_PPC_KUAP
static inline void kuap_user_restore(struct pt_regs *regs) static inline void kuap_user_restore(struct pt_regs *regs)
{ {
if (!mmu_has_feature(MMU_FTR_PKEY)) if (!mmu_has_feature(MMU_FTR_PKEY))
......
...@@ -226,10 +226,6 @@ struct thread_struct { ...@@ -226,10 +226,6 @@ struct thread_struct {
struct thread_vr_state ckvr_state; /* Checkpointed VR state */ struct thread_vr_state ckvr_state; /* Checkpointed VR state */
unsigned long ckvrsave; /* Checkpointed VRSAVE */ unsigned long ckvrsave; /* Checkpointed VRSAVE */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#ifdef CONFIG_PPC_MEM_KEYS
unsigned long amr;
unsigned long iamr;
#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */ void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
......
...@@ -589,7 +589,6 @@ static void save_all(struct task_struct *tsk) ...@@ -589,7 +589,6 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk); __giveup_spe(tsk);
msr_check_and_clear(msr_all_available); msr_check_and_clear(msr_all_available);
thread_pkey_regs_save(&tsk->thread);
} }
void flush_all_to_thread(struct task_struct *tsk) void flush_all_to_thread(struct task_struct *tsk)
...@@ -1160,8 +1159,6 @@ static inline void save_sprs(struct thread_struct *t) ...@@ -1160,8 +1159,6 @@ static inline void save_sprs(struct thread_struct *t)
t->tar = mfspr(SPRN_TAR); t->tar = mfspr(SPRN_TAR);
} }
#endif #endif
thread_pkey_regs_save(t);
} }
static inline void restore_sprs(struct thread_struct *old_thread, static inline void restore_sprs(struct thread_struct *old_thread,
...@@ -1202,7 +1199,6 @@ static inline void restore_sprs(struct thread_struct *old_thread, ...@@ -1202,7 +1199,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
mtspr(SPRN_TIDR, new_thread->tidr); mtspr(SPRN_TIDR, new_thread->tidr);
#endif #endif
thread_pkey_regs_restore(new_thread, old_thread);
} }
struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *__switch_to(struct task_struct *prev,
......
...@@ -347,12 +347,6 @@ static bool exception_common(int signr, struct pt_regs *regs, int code, ...@@ -347,12 +347,6 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
current->thread.trap_nr = code; current->thread.trap_nr = code;
/*
* Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
* to capture the content, if the task gets killed.
*/
thread_pkey_regs_save(&current->thread);
return true; return true;
} }
......
...@@ -281,30 +281,17 @@ void __init setup_kuap(bool disabled) ...@@ -281,30 +281,17 @@ void __init setup_kuap(bool disabled)
} }
#endif #endif
static inline u64 read_amr(void) static inline void update_current_thread_amr(u64 value)
{ {
return mfspr(SPRN_AMR); current->thread.regs->amr = value;
} }
static inline void write_amr(u64 value) static inline void update_current_thread_iamr(u64 value)
{
mtspr(SPRN_AMR, value);
}
static inline u64 read_iamr(void)
{
if (!likely(pkey_execute_disable_supported))
return 0x0UL;
return mfspr(SPRN_IAMR);
}
static inline void write_iamr(u64 value)
{ {
if (!likely(pkey_execute_disable_supported)) if (!likely(pkey_execute_disable_supported))
return; return;
mtspr(SPRN_IAMR, value); current->thread.regs->iamr = value;
} }
#ifdef CONFIG_PPC_MEM_KEYS #ifdef CONFIG_PPC_MEM_KEYS
...@@ -319,17 +306,17 @@ void pkey_mm_init(struct mm_struct *mm) ...@@ -319,17 +306,17 @@ void pkey_mm_init(struct mm_struct *mm)
static inline void init_amr(int pkey, u8 init_bits) static inline void init_amr(int pkey, u8 init_bits)
{ {
u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey)); u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey)); u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
write_amr(old_amr | new_amr_bits); update_current_thread_amr(old_amr | new_amr_bits);
} }
static inline void init_iamr(int pkey, u8 init_bits) static inline void init_iamr(int pkey, u8 init_bits)
{ {
u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey)); u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey)); u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
write_iamr(old_iamr | new_iamr_bits); update_current_thread_iamr(old_iamr | new_iamr_bits);
} }
/* /*
...@@ -372,30 +359,6 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, ...@@ -372,30 +359,6 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
return 0; return 0;
} }
void thread_pkey_regs_save(struct thread_struct *thread)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
/*
* TODO: Skip saving registers if @thread hasn't used any keys yet.
*/
thread->amr = read_amr();
thread->iamr = read_iamr();
}
void thread_pkey_regs_restore(struct thread_struct *new_thread,
struct thread_struct *old_thread)
{
if (!mmu_has_feature(MMU_FTR_PKEY))
return;
if (old_thread->amr != new_thread->amr)
write_amr(new_thread->amr);
if (old_thread->iamr != new_thread->iamr)
write_iamr(new_thread->iamr);
}
int execute_only_pkey(struct mm_struct *mm) int execute_only_pkey(struct mm_struct *mm)
{ {
return mm->context.execute_only_pkey; return mm->context.execute_only_pkey;
...@@ -444,9 +407,9 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute) ...@@ -444,9 +407,9 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
pkey_shift = pkeyshift(pkey); pkey_shift = pkeyshift(pkey);
if (execute) if (execute)
return !(read_iamr() & (IAMR_EX_BIT << pkey_shift)); return !(current_thread_iamr() & (IAMR_EX_BIT << pkey_shift));
amr = read_amr(); amr = current_thread_amr();
if (write) if (write)
return !(amr & (AMR_WR_BIT << pkey_shift)); return !(amr & (AMR_WR_BIT << pkey_shift));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment