Commit f0098155 authored by Hector Martin's avatar Hector Martin Committed by Catalin Marinas

arm64: Always keep DAIF.[IF] in sync

Apple SoCs (A11 and newer) have some interrupt sources hardwired to the
FIQ line. We implement support for this by simply treating IRQs and FIQs
the same way in the interrupt vectors.

To support these systems, the FIQ mask bit needs to be kept in sync with
the IRQ mask bit, so both kinds of exceptions are masked together. No
other platforms should be delivering FIQ exceptions right now, and we
already unmask FIQ in normal process context, so this should not have an
effect on other systems - if spurious FIQs were arriving, they would
already panic the kernel.
Signed-off-by: default avatarHector Martin <marcan@marcan.st>
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarHector Martin <marcan@marcan.st>
Cc: James Morse <james.morse@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will@kernel.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210315115629.57191-6-mark.rutland@arm.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 9eb563cd
......@@ -173,7 +173,7 @@ static inline void gic_pmr_mask_irqs(void)
static inline void gic_arch_enable_irqs(void)
{
asm volatile ("msr daifclr, #2" : : : "memory");
asm volatile ("msr daifclr, #3" : : : "memory");
}
#endif /* __ASSEMBLY__ */
......
......@@ -40,9 +40,9 @@
msr daif, \flags
.endm
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
.macro enable_da_f
msr daifclr, #(8 | 4 | 1)
/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
.macro enable_da
msr daifclr, #(8 | 4)
.endm
/*
......@@ -50,7 +50,7 @@
*/
.macro save_and_disable_irq, flags
mrs \flags, daif
msr daifset, #2
msr daifset, #3
.endm
.macro restore_irq, flags
......
......@@ -13,8 +13,8 @@
#include <asm/ptrace.h>
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
......@@ -47,7 +47,7 @@ static inline unsigned long local_daif_save_flags(void)
if (system_uses_irq_prio_masking()) {
/* If IRQs are masked with PMR, reflect it in the flags */
if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
flags |= PSR_I_BIT;
flags |= PSR_I_BIT | PSR_F_BIT;
}
return flags;
......@@ -69,7 +69,7 @@ static inline void local_daif_restore(unsigned long flags)
bool irq_disabled = flags & PSR_I_BIT;
WARN_ON(system_has_prio_mask_debugging() &&
!(read_sysreg(daif) & PSR_I_BIT));
(read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
if (!irq_disabled) {
trace_hardirqs_on();
......@@ -86,7 +86,7 @@ static inline void local_daif_restore(unsigned long flags)
* If interrupts are disabled but we can take
* asynchronous errors, we can take NMIs
*/
flags &= ~PSR_I_BIT;
flags &= ~(PSR_I_BIT | PSR_F_BIT);
pmr = GIC_PRIO_IRQOFF;
} else {
pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
......
......@@ -12,15 +12,13 @@
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
* FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
* FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
* order:
* Masking debug exceptions causes all other exceptions to be masked too/
* Masking SError masks irq, but not debug exceptions. Masking irqs has no
* side effects for other flags. Keeping to this order makes it easier for
* entry.S to know which exceptions should be unmasked.
*
* FIQ is never expected, but we mask it when we disable debug exceptions, and
* unmask it at all other times.
* Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
* always masked and unmasked together, and have no side effects for other
* flags. Keeping to this order makes it easier for entry.S to know which
* exceptions should be unmasked.
*/
/*
......@@ -35,7 +33,7 @@ static inline void arch_local_irq_enable(void)
}
asm volatile(ALTERNATIVE(
"msr daifclr, #2 // arch_local_irq_enable",
"msr daifclr, #3 // arch_local_irq_enable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
......@@ -54,7 +52,7 @@ static inline void arch_local_irq_disable(void)
}
asm volatile(ALTERNATIVE(
"msr daifset, #2 // arch_local_irq_disable",
"msr daifset, #3 // arch_local_irq_disable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
......
......@@ -533,7 +533,7 @@ alternative_endif
.macro el1_interrupt_handler, handler:req
gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f
enable_da
mov x0, sp
bl enter_el1_irq_or_nmi
......@@ -544,8 +544,10 @@ alternative_endif
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/*
* DA_F were cleared at start of handling. If anything is set in DAIF,
* we come back from an NMI, so skip preemption
* DA were cleared at start of handling, and IF are cleared by
* the GIC irqchip driver using gic_arch_enable_irqs() for
* normal IRQs. If anything is set, it means we come back from
* an NMI instead of a normal IRQ, so skip preemption
*/
mrs x0, daif
orr x24, x24, x0
......@@ -562,7 +564,7 @@ alternative_else_nop_endif
.macro el0_interrupt_handler, handler:req
gic_prio_irq_setup pmr=x20, tmp=x0
user_exit_irqoff
enable_da_f
enable_da
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
......@@ -763,7 +765,7 @@ el0_error_naked:
mov x0, sp
mov x1, x25
bl do_serror
enable_da_f
enable_da
b ret_to_user
SYM_CODE_END(el0_error)
......
......@@ -84,7 +84,7 @@ static void noinstr __cpu_do_idle_irqprio(void)
unsigned long daif_bits;
daif_bits = read_sysreg(daif);
write_sysreg(daif_bits | PSR_I_BIT, daif);
write_sysreg(daif_bits | PSR_I_BIT | PSR_F_BIT, daif);
/*
* Unmask PMR before going idle to make sure interrupts can
......
......@@ -188,6 +188,7 @@ static void init_gic_priority_masking(void)
cpuflags = read_sysreg(daif);
WARN_ON(!(cpuflags & PSR_I_BIT));
WARN_ON(!(cpuflags & PSR_F_BIT));
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment