Commit baddc87d authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'fixes' into next

Merge our fixes branch from this cycle. It contains several important
fixes we need in next for testing purposes, and also some that will
conflict with upcoming changes.
parents bb5f33c0 595d153d
...@@ -130,7 +130,7 @@ config PPC ...@@ -130,7 +130,7 @@ config PPC
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION) select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UACCESS_FLUSHCACHE
select ARCH_HAS_UACCESS_MCSAFE if PPC64 select ARCH_HAS_UACCESS_MCSAFE if PPC64
......
...@@ -17,9 +17,9 @@ ...@@ -17,9 +17,9 @@
* updating the accessed and modified bits in the page table tree. * updating the accessed and modified bits in the page table tree.
*/ */
#define _PAGE_USER 0x001 /* usermode access allowed */ #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
#define _PAGE_RW 0x002 /* software: user write access allowed */ #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
#define _PAGE_PRESENT 0x004 /* software: pte contains a translation */ #define _PAGE_USER 0x004 /* usermode access allowed */
#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#define _PAGE_DIRTY 0x080 /* C: page changed */ #define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */ #define _PAGE_ACCESSED 0x100 /* R: page referenced */
#define _PAGE_EXEC 0x200 /* software: exec allowed */ #define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_HASHPTE 0x400 /* hash_page has made an HPTE for this pte */ #define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */ #define _PAGE_SPECIAL 0x800 /* software: Special page */
#ifdef CONFIG_PTE_64BIT #ifdef CONFIG_PTE_64BIT
......
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
.macro kuap_check current, gpr .macro kuap_check current, gpr
#ifdef CONFIG_PPC_KUAP_DEBUG #ifdef CONFIG_PPC_KUAP_DEBUG
lwz \gpr2, KUAP(thread) lwz \gpr, KUAP(thread)
999: twnei \gpr, 0 999: twnei \gpr, 0
EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE) EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
#endif #endif
......
...@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void) ...@@ -250,9 +250,27 @@ static inline bool arch_irqs_disabled(void)
} \ } \
} while(0) } while(0)
static inline bool __lazy_irq_pending(u8 irq_happened)
{
return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
}
/*
* Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
*/
static inline bool lazy_irq_pending(void) static inline bool lazy_irq_pending(void)
{ {
return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS); return __lazy_irq_pending(get_paca()->irq_happened);
}
/*
* Check if a lazy IRQ is pending, with no debugging checks.
* Should be called with IRQs hard disabled.
* For use in RI disabled code or other constrained situations.
*/
static inline bool lazy_irq_pending_nocheck(void)
{
return __lazy_irq_pending(local_paca->irq_happened);
} }
/* /*
......
...@@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE) ...@@ -732,7 +732,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
stw r10,_CCR(r1) stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */ stw r1,KSP(r3) /* Set old stack pointer */
kuap_check r2, r4 kuap_check r2, r0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the /* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all * previous task gets rescheduled on another CPU, it sees all
......
...@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -472,15 +472,17 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_BOOK3S #ifdef CONFIG_PPC_BOOK3S
/* /*
* If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
* touched, AMR not set, no exit work created, then this can be used. * touched, no exit work created, then this can be used.
*/ */
.balign IFETCH_ALIGN_BYTES .balign IFETCH_ALIGN_BYTES
.globl fast_interrupt_return .globl fast_interrupt_return
fast_interrupt_return: fast_interrupt_return:
_ASM_NOKPROBE_SYMBOL(fast_interrupt_return) _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
kuap_check_amr r3, r4
ld r4,_MSR(r1) ld r4,_MSR(r1)
andi. r0,r4,MSR_PR andi. r0,r4,MSR_PR
bne .Lfast_user_interrupt_return bne .Lfast_user_interrupt_return
kuap_restore_amr r3
andi. r0,r4,MSR_RI andi. r0,r4,MSR_RI
li r3,0 /* 0 return value, no EMULATE_STACK_STORE */ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
bne+ .Lfast_kernel_interrupt_return bne+ .Lfast_kernel_interrupt_return
......
...@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common) ...@@ -971,6 +971,7 @@ EXC_COMMON_BEGIN(system_reset_common)
ld r10,SOFTE(r1) ld r10,SOFTE(r1)
stb r10,PACAIRQSOFTMASK(r13) stb r10,PACAIRQSOFTMASK(r13)
kuap_restore_amr r10
EXCEPTION_RESTORE_REGS EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL RFI_TO_USER_OR_KERNEL
...@@ -2435,6 +2436,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common) ...@@ -2435,6 +2436,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
GEN_COMMON facility_unavailable GEN_COMMON facility_unavailable
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception bl facility_unavailable_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
b interrupt_return b interrupt_return
GEN_KVM facility_unavailable GEN_KVM facility_unavailable
...@@ -2464,6 +2466,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common) ...@@ -2464,6 +2466,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
GEN_COMMON h_facility_unavailable GEN_COMMON h_facility_unavailable
addi r3,r1,STACK_FRAME_OVERHEAD addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception bl facility_unavailable_exception
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
b interrupt_return b interrupt_return
GEN_KVM h_facility_unavailable GEN_KVM h_facility_unavailable
......
...@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION ...@@ -348,7 +348,7 @@ BEGIN_MMU_FTR_SECTION
andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif #endif
bne handle_page_fault_tramp_2 /* if not, try to put a PTE */ bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
rlwinm r3, r5, 32 - 24, 30, 30 /* DSISR_STORE -> _PAGE_RW */ rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
bl hash_page bl hash_page
b handle_page_fault_tramp_1 b handle_page_fault_tramp_1
FTR_SECTION_ELSE FTR_SECTION_ELSE
...@@ -497,6 +497,7 @@ InstructionTLBMiss: ...@@ -497,6 +497,7 @@ InstructionTLBMiss:
andc. r1,r1,r0 /* check access & ~permission */ andc. r1,r1,r0 /* check access & ~permission */
bne- InstructionAddressInvalid /* return if access not permitted */ bne- InstructionAddressInvalid /* return if access not permitted */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r1, r1, 0xe06 /* clear out reserved bits */ ori r1, r1, 0xe06 /* clear out reserved bits */
andc r1, r0, r1 /* PP = user? 1 : 0 */ andc r1, r0, r1 /* PP = user? 1 : 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -564,8 +565,9 @@ DataLoadTLBMiss: ...@@ -564,8 +565,9 @@ DataLoadTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r0,0,30,30 /* _PAGE_RW -> PP msb */ rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
ori r1,r1,0xe04 /* clear out reserved bits */ ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */ andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -643,6 +645,7 @@ DataStoreTLBMiss: ...@@ -643,6 +645,7 @@ DataStoreTLBMiss:
* we would need to update the pte atomically with lwarx/stwcx. * we would need to update the pte atomically with lwarx/stwcx.
*/ */
/* Convert linux-style PTE to low word of PPC-style PTE */ /* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */ li r1,0xe06 /* clear out reserved bits & PP msb */
andc r1,r0,r1 /* PP = user? 1: 0 */ andc r1,r0,r1 /* PP = user? 1: 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
......
...@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit) ...@@ -344,8 +344,9 @@ _ENTRY(saved_ksp_limit)
/* 0x0C00 - System Call Exception */ /* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall) START_EXCEPTION(0x0C00, SystemCall)
SYSCALL_ENTRY 0xc00 SYSCALL_ENTRY 0xc00
/* Trap_0D is commented out to get more space for system call exception */
EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) /* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD) EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD) EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
......
...@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void) ...@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void)
* to be stored as an xattr or as an appended signature. * to be stored as an xattr or as an appended signature.
* *
* To avoid duplicate signature verification as much as possible, the IMA * To avoid duplicate signature verification as much as possible, the IMA
* policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE * policy rule for module appraisal is added only if CONFIG_MODULE_SIG
* is not enabled. * is not enabled.
*/ */
static const char *const secure_rules[] = { static const char *const secure_rules[] = {
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG_FORCE #ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif #endif
NULL NULL
...@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = { ...@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = {
"measure func=KEXEC_KERNEL_CHECK template=ima-modsig", "measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
"measure func=MODULE_CHECK template=ima-modsig", "measure func=MODULE_CHECK template=ima-modsig",
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#ifndef CONFIG_MODULE_SIG_FORCE #ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig", "appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif #endif
NULL NULL
......
...@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np, ...@@ -534,6 +534,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL); lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL) if (bsizep == NULL)
bsizep = lsizep; bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL) if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep); lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL) if (bsizep != NULL)
......
...@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -35,6 +35,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED); BUG_ON(regs->softe != IRQS_ENABLED);
kuap_check_amr();
account_cpu_user_entry(); account_cpu_user_entry();
#ifdef CONFIG_PPC_SPLPAR #ifdef CONFIG_PPC_SPLPAR
...@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5, ...@@ -47,8 +49,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
} }
#endif #endif
kuap_check_amr();
/* /*
* This is not required for the syscall exit path, but makes the * This is not required for the syscall exit path, but makes the
* stack frame look nicer. If this was initialised in the first stack * stack frame look nicer. If this was initialised in the first stack
...@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -117,6 +117,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
unsigned long ti_flags; unsigned long ti_flags;
unsigned long ret = 0; unsigned long ret = 0;
kuap_check_amr();
regs->result = r3; regs->result = r3;
/* Check whether the syscall is issued inside a restartable sequence */ /* Check whether the syscall is issued inside a restartable sequence */
...@@ -189,7 +191,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -189,7 +191,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
/* This pattern matches prep_irq_for_idle */ /* This pattern matches prep_irq_for_idle */
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
trace_hardirqs_off(); trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
...@@ -204,8 +206,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3, ...@@ -204,8 +206,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
local_paca->tm_scratch = regs->msr; local_paca->tm_scratch = regs->msr;
#endif #endif
kuap_check_amr();
account_cpu_user_exit(); account_cpu_user_exit();
return ret; return ret;
...@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -228,6 +228,8 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
BUG_ON(regs->softe != IRQS_ENABLED); BUG_ON(regs->softe != IRQS_ENABLED);
kuap_check_amr();
local_irq_save(flags); local_irq_save(flags);
again: again:
...@@ -264,7 +266,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -264,7 +266,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
trace_hardirqs_on(); trace_hardirqs_on();
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
trace_hardirqs_off(); trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
...@@ -292,8 +294,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned ...@@ -292,8 +294,6 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
local_paca->tm_scratch = regs->msr; local_paca->tm_scratch = regs->msr;
#endif #endif
kuap_check_amr();
account_cpu_user_exit(); account_cpu_user_exit();
return ret; return ret;
...@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -313,6 +313,8 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
BUG_ON(regs->msr & MSR_PR); BUG_ON(regs->msr & MSR_PR);
BUG_ON(!FULL_REGS(regs)); BUG_ON(!FULL_REGS(regs));
kuap_check_amr();
if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) { if (unlikely(*ti_flagsp & _TIF_EMULATE_STACK_STORE)) {
clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp); clear_bits(_TIF_EMULATE_STACK_STORE, ti_flagsp);
ret = 1; ret = 1;
...@@ -334,7 +336,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign ...@@ -334,7 +336,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
trace_hardirqs_on(); trace_hardirqs_on();
__hard_EE_RI_disable(); __hard_EE_RI_disable();
if (unlikely(lazy_irq_pending())) { if (unlikely(lazy_irq_pending_nocheck())) {
__hard_RI_enable(); __hard_RI_enable();
irq_soft_mask_set(IRQS_ALL_DISABLED); irq_soft_mask_set(IRQS_ALL_DISABLED);
trace_hardirqs_off(); trace_hardirqs_off();
......
...@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres) ...@@ -218,11 +218,11 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
blr blr
/* /*
* invalid clock * syscall fallback
*/ */
99: 99:
li r3, EINVAL li r0,__NR_clock_getres
crset so sc
blr blr
.cfi_endproc .cfi_endproc
V_FUNCTION_END(__kernel_clock_getres) V_FUNCTION_END(__kernel_clock_getres)
......
...@@ -35,7 +35,7 @@ mmu_hash_lock: ...@@ -35,7 +35,7 @@ mmu_hash_lock:
/* /*
* Load a PTE into the hash table, if possible. * Load a PTE into the hash table, if possible.
* The address is in r4, and r3 contains an access flag: * The address is in r4, and r3 contains an access flag:
* _PAGE_RW (0x002) if a write. * _PAGE_RW (0x400) if a write.
* r9 contains the SRR1 value, from which we use the MSR_PR bit. * r9 contains the SRR1 value, from which we use the MSR_PR bit.
* SPRG_THREAD contains the physical address of the current task's thread. * SPRG_THREAD contains the physical address of the current task's thread.
* *
...@@ -69,7 +69,7 @@ _GLOBAL(hash_page) ...@@ -69,7 +69,7 @@ _GLOBAL(hash_page)
blt+ 112f /* assume user more likely */ blt+ 112f /* assume user more likely */
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
rlwimi r3,r9,32-14,31,31 /* MSR_PR -> _PAGE_USER */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: 112:
#ifndef CONFIG_PTE_64BIT #ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
...@@ -94,7 +94,7 @@ _GLOBAL(hash_page) ...@@ -94,7 +94,7 @@ _GLOBAL(hash_page)
#else #else
rlwimi r8,r4,23,20,28 /* compute pte address */ rlwimi r8,r4,23,20,28 /* compute pte address */
#endif #endif
rlwinm r0,r3,6,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
/* /*
...@@ -310,9 +310,11 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) ...@@ -310,9 +310,11 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
_GLOBAL(create_hpte) _GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
and r8,r5,r0 /* writable if _RW & _DIRTY */ and r8,r8,r0 /* writable if _RW & _DIRTY */
rlwimi r5,r5,1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
ori r8,r8,0xe04 /* clear out reserved bits */ ori r8,r8,0xe04 /* clear out reserved bits */
andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
...@@ -564,7 +566,7 @@ _GLOBAL(flush_hash_pages) ...@@ -564,7 +566,7 @@ _GLOBAL(flush_hash_pages)
33: lwarx r8,0,r5 /* fetch the pte flags word */ 33: lwarx r8,0,r5 /* fetch the pte flags word */
andi. r0,r8,_PAGE_HASHPTE andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */ beq 8f /* done if HASHPTE is already clear */
rlwinm r8,r8,0,~_PAGE_HASHPTE /* clear HASHPTE bit */ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r8,0,r5 /* update the pte */ stwcx. r8,0,r5 /* update the pte */
bne- 33b bne- 33b
......
...@@ -397,7 +397,7 @@ config PPC_KUAP ...@@ -397,7 +397,7 @@ config PPC_KUAP
config PPC_KUAP_DEBUG config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection" bool "Extra debugging for Kernel Userspace Access Protection"
depends on PPC_KUAP && (PPC_RADIX_MMU || PPC_32) depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
help help
Add extra debugging for Kernel Userspace Access Protection (KUAP) Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N. If you're unsure, say N.
......
...@@ -722,22 +722,22 @@ do { \ ...@@ -722,22 +722,22 @@ do { \
do { \ do { \
if (__builtin_constant_p(bh) && (bh) == 0) \ if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \ "rI" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
"rI" ((USItype)(bl))); \ "rI" ((USItype)(bl))); \
else \ else \
__asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "%r" ((USItype)(ah)), \ : "%r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \ "r" ((USItype)(bh)), \
"%r" ((USItype)(al)), \ "%r" ((USItype)(al)), \
...@@ -747,36 +747,36 @@ do { \ ...@@ -747,36 +747,36 @@ do { \
do { \ do { \
if (__builtin_constant_p(ah) && (ah) == 0) \ if (__builtin_constant_p(ah) && (ah) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(bh)), \ : "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(bh)), \ : "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == 0) \ else if (__builtin_constant_p(bh) && (bh) == 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \
__asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
"r" ((USItype)(bl))); \ "r" ((USItype)(bl))); \
else \ else \
__asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
: "=r" ((USItype)(sh)), \ : "=r" (sh), \
"=&r" ((USItype)(sl)) \ "=&r" (sl) \
: "r" ((USItype)(ah)), \ : "r" ((USItype)(ah)), \
"r" ((USItype)(bh)), \ "r" ((USItype)(bh)), \
"rI" ((USItype)(al)), \ "rI" ((USItype)(al)), \
...@@ -787,7 +787,7 @@ do { \ ...@@ -787,7 +787,7 @@ do { \
do { \ do { \
USItype __m0 = (m0), __m1 = (m1); \ USItype __m0 = (m0), __m1 = (m1); \
__asm__ ("mulhwu %0,%1,%2" \ __asm__ ("mulhwu %0,%1,%2" \
: "=r" ((USItype) ph) \ : "=r" (ph) \
: "%r" (__m0), \ : "%r" (__m0), \
"r" (__m1)); \ "r" (__m1)); \
(pl) = __m0 * __m1; \ (pl) = __m0 * __m1; \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment