Commit 02fb638b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - remove a misuse of kernel-doc comment

 - use "Call trace:" for backtraces like other architectures

 - implement copy_from_kernel_nofault_allowed() to fix a LKDTM test

 - add a "cut here" line for prefetch aborts

 - remove unnecessary Kconfing entry for FRAME_POINTER

 - remove iwmmxy support for PJ4/PJ4B cores

 - use bitfield helpers in ptrace to improve readabililty

 - check if folio is reserved before flushing

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: 9359/1: flush: check if the folio is reserved for no-mapping addresses
  ARM: 9354/1: ptrace: Use bitfield helpers
  ARM: 9352/1: iwmmxt: Remove support for PJ4/PJ4B cores
  ARM: 9353/1: remove unneeded entry for CONFIG_FRAME_POINTER
  ARM: 9351/1: fault: Add "cut here" line for prefetch aborts
  ARM: 9350/1: fault: Implement copy_from_kernel_nofault_allowed()
  ARM: 9349/1: unwind: Add missing "Call trace:" line
  ARM: 9334/1: mm: init: remove misuse of kernel-doc comment
parents b7187139 b42b3ae1
...@@ -505,8 +505,8 @@ source "arch/arm/mm/Kconfig" ...@@ -505,8 +505,8 @@ source "arch/arm/mm/Kconfig"
config IWMMXT config IWMMXT
bool "Enable iWMMXt support" bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B default y if PXA27x || PXA3xx || ARCH_MMP
help help
Enable support for iWMMXt context switching at run time if Enable support for iWMMXt context switching at run time if
running on a CPU that supports it. running on a CPU that supports it.
......
...@@ -90,9 +90,6 @@ config BACKTRACE_VERBOSE ...@@ -90,9 +90,6 @@ config BACKTRACE_VERBOSE
In most cases, say N here, unless you are intending to debug the In most cases, say N here, unless you are intending to debug the
kernel and have access to the kernel binary image. kernel and have access to the kernel binary image.
config FRAME_POINTER
bool
config DEBUG_USER config DEBUG_USER
bool "Verbose user fault messages" bool "Verbose user fault messages"
help help
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <uapi/asm/ptrace.h> #include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/types.h> #include <linux/types.h>
struct pt_regs { struct pt_regs {
...@@ -35,8 +36,8 @@ struct svc_pt_regs { ...@@ -35,8 +36,8 @@ struct svc_pt_regs {
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
#define isa_mode(regs) \ #define isa_mode(regs) \
((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \ (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
(((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT)))) FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
#else #else
#define isa_mode(regs) 1 /* Thumb */ #define isa_mode(regs) 1 /* Thumb */
#endif #endif
......
...@@ -76,8 +76,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o ...@@ -76,8 +76,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
......
...@@ -18,18 +18,6 @@ ...@@ -18,18 +18,6 @@
#include <asm/assembler.h> #include <asm/assembler.h>
#include "iwmmxt.h" #include "iwmmxt.h"
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code
#define XSC(code...)
#elif defined(CONFIG_CPU_MOHAWK) || \
defined(CONFIG_CPU_XSC3) || \
defined(CONFIG_CPU_XSCALE)
#define PJ4(code...)
#define XSC(code...) code
#else
#error "Unsupported iWMMXt architecture"
#endif
#define MMX_WR0 (0x00) #define MMX_WR0 (0x00)
#define MMX_WR1 (0x08) #define MMX_WR1 (0x08)
#define MMX_WR2 (0x10) #define MMX_WR2 (0x10)
...@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler) ...@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
ENTRY(iwmmxt_task_enable) ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3 inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0) mrc p15, 0, r2, c15, c1, 0
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible? @ CP0 and CP1 accessible?
XSC(tst r2, #0x3) tst r2, #0x3
PJ4(tst r2, #0xf)
bne 4f @ if so no business here bne 4f @ if so no business here
@ enable access to CP0 and CP1 @ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3) orr r2, r2, #0x3
XSC(mcr p15, 0, r2, c15, c1, 0) mcr p15, 0, r2, c15, c1, 0
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner ldr r3, =concan_owner
ldr r2, [r0, #S_PC] @ current task pc value ldr r2, [r0, #S_PC] @ current task pc value
...@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable) ...@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
bne 1f @ no: quit bne 1f @ no: quit
@ enable access to CP0 and CP1 @ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0) mrc p15, 0, r4, c15, c1, 0
XSC(orr r4, r4, #0x3) orr r4, r4, #0x3
XSC(mcr p15, 0, r4, c15, c1, 0) mcr p15, 0, r4, c15, c1, 0
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner str r0, [r3] @ no more current owner
...@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable) ...@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
bl concan_save bl concan_save
@ disable access to CP0 and CP1 @ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3) bic r4, r4, #0x3
XSC(mcr p15, 0, r4, c15, c1, 0) mcr p15, 0, r4, c15, c1, 0
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r2, c2, c0, 0 mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait mov r2, r2 @ cpwait
...@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore) ...@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
*/ */
ENTRY(iwmmxt_task_switch) ENTRY(iwmmxt_task_switch)
XSC(mrc p15, 0, r1, c15, c1, 0) mrc p15, 0, r1, c15, c1, 0
PJ4(mrc p15, 0, r1, c1, c0, 2)
@ CP0 and CP1 accessible? @ CP0 and CP1 accessible?
XSC(tst r1, #0x3) tst r1, #0x3
PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task bne 1f @ yes: block them for next task
ldr r2, =concan_owner ldr r2, =concan_owner
...@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch) ...@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
retne lr @ no: leave Concan disabled retne lr @ no: leave Concan disabled
1: @ flip Concan access 1: @ flip Concan access
XSC(eor r1, r1, #0x3) eor r1, r1, #0x3
XSC(mcr p15, 0, r1, c15, c1, 0) mcr p15, 0, r1, c15, c1, 0
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0 mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return sub pc, lr, r1, lsr #32 @ cpwait and return
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/kernel/pj4-cp0.c
*
* PJ4 iWMMXt coprocessor context switching and handling
*
* Copyright (c) 2010 Marvell International Inc.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
#include <asm/cputype.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_EXIT:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
static u32 __init pj4_cp_access_read(void)
{
u32 value;
__asm__ __volatile__ (
"mrc p15, 0, %0, c1, c0, 2\n\t"
: "=r" (value));
return value;
}
static void __init pj4_cp_access_write(u32 value)
{
u32 temp;
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
#ifdef CONFIG_THUMB2_KERNEL
"isb\n\t"
#else
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
#endif
: "=r" (temp) : "r" (value));
}
static int __init pj4_get_iwmmxt_version(void)
{
u32 cp_access, wcid;
cp_access = pj4_cp_access_read();
pj4_cp_access_write(cp_access | 0xf);
/* check if coprocessor 0 and 1 are available */
if ((pj4_cp_access_read() & 0xf) != 0xf) {
pj4_cp_access_write(cp_access);
return -ENODEV;
}
/* read iWMMXt coprocessor id register p1, c0 */
__asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
pj4_cp_access_write(cp_access);
/* iWMMXt v1 */
if ((wcid & 0xffffff00) == 0x56051000)
return 1;
/* iWMMXt v2 */
if ((wcid & 0xffffff00) == 0x56052000)
return 2;
return -EINVAL;
}
/*
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
* switch code handle iWMMXt context switching.
*/
static int __init pj4_cp0_init(void)
{
u32 __maybe_unused cp_access;
int vers;
if (!cpu_is_pj4())
return 0;
vers = pj4_get_iwmmxt_version();
if (vers < 0)
return 0;
#ifndef CONFIG_IWMMXT
pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
#else
cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access);
pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
register_iwmmxt_undef_handler();
#endif
return 0;
}
late_initcall(pj4_cp0_init);
...@@ -220,7 +220,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, ...@@ -220,7 +220,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
unsigned int fp, mode; unsigned int fp, mode;
int ok = 1; int ok = 1;
printk("%sBacktrace: ", loglvl); printk("%sCall trace: ", loglvl);
if (!tsk) if (!tsk)
tsk = current; tsk = current;
......
...@@ -524,6 +524,8 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk, ...@@ -524,6 +524,8 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
{ {
struct stackframe frame; struct stackframe frame;
printk("%sCall trace: ", loglvl);
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk) if (!tsk)
......
...@@ -25,6 +25,13 @@ ...@@ -25,6 +25,13 @@
#include "fault.h" #include "fault.h"
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
unsigned long addr = (unsigned long)unsafe_src;
return addr >= TASK_SIZE && ULONG_MAX - addr >= size;
}
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
...@@ -588,6 +595,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) ...@@ -588,6 +595,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
return; return;
pr_alert("8<--- cut here ---\n");
pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
inf->name, ifsr, addr); inf->name, ifsr, addr);
......
...@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval) ...@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
return; return;
folio = page_folio(pfn_to_page(pfn)); folio = page_folio(pfn_to_page(pfn));
if (folio_test_reserved(folio))
return;
if (cache_is_vipt_aliasing()) if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio); mapping = folio_flush_mapping(folio);
else else
......
...@@ -418,7 +418,7 @@ static void set_section_perms(struct section_perm *perms, int n, bool set, ...@@ -418,7 +418,7 @@ static void set_section_perms(struct section_perm *perms, int n, bool set,
} }
/** /*
* update_sections_early intended to be called only through stop_machine * update_sections_early intended to be called only through stop_machine
* framework and executed by only one CPU while all other CPUs will spin and * framework and executed by only one CPU while all other CPUs will spin and
* wait, so no locking is required in this function. * wait, so no locking is required in this function.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment