Commit 3e3a138a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - amba bus irq rework

 - add kfence support

 - support for Cortex M33 and M55 CPUs

 - kbuild updates for decompressor

 - let core code manage thread_info::cpu

 - avoid unpredictable NOP encoding in decompressor

 - reduce information printed in calltraces

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
  ARM: reduce the information printed in call traces
  ARM: 9168/1: Add support for Cortex-M55 processor
  ARM: 9167/1: Add support for Cortex-M33 processor
  ARM: 9166/1: Support KFENCE for ARM
  ARM: 9165/1: mm: Provide is_write_fault()
  ARM: 9164/1: mm: Provide set_memory_valid()
  ARM: 9163/1: amba: Move of_amba_device_decode_irq() into amba_probe()
  ARM: 9162/1: amba: Kill sysfs attribute file of irq
  ARM: 9161/1: mm: mark private VM_FAULT_X defines as vm_fault_t
  ARM: 9159/1: decompressor: Avoid UNPREDICTABLE NOP encoding
  ARM: 9158/1: leave it to core code to manage thread_info::cpu
  ARM: 9154/1: decompressor: do not copy source files while building
parents c1eb8f6c b0343ab3
......@@ -8,7 +8,7 @@
-----------------------
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
| arm: | ok |
| arm64: | ok |
| csky: | TODO |
| h8300: | TODO |
......
......@@ -69,6 +69,7 @@ config ARM
select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
select HAVE_ARCH_MMAP_RND_BITS if MMU
......
......@@ -81,6 +81,17 @@ endchoice
config ARM_UNWIND
bool
config BACKTRACE_VERBOSE
bool "Verbose backtrace"
depends on EXPERT
help
When the kernel produces a warning or oops, the kernel prints a
trace of the call chain. This option controls whether we include
the numeric addresses or only include the symbolic information.
In most cases, say N here, unless you are intending to debug the
kernel and have access to the kernel binary image.
config FRAME_POINTER
bool
......
# SPDX-License-Identifier: GPL-2.0-only
ashldi3.S
bswapsdi2.S
font.c
lib1funcs.S
hyp-stub.S
piggy_data
vmlinux
vmlinux.lds
......@@ -13,7 +13,6 @@ ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
OBJS += debug.o
AFLAGS_head.o += -DDEBUG
endif
FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c
# string library code (-Os is enforced to keep it much smaller)
OBJS += string.o
......@@ -99,11 +98,8 @@ $(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \
$(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
targets := vmlinux vmlinux.lds piggy_data piggy.o \
lib1funcs.o ashldi3.o bswapsdi2.o \
head.o $(OBJS)
clean-files += lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
......@@ -134,23 +130,7 @@ endif
# Next argument is a linker script
LDFLAGS_vmlinux += -T
# For __aeabi_uidivmod
lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
$(call cmd,shipped)
# For __aeabi_llsl
ashldi3 = $(obj)/ashldi3.o
$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S
$(call cmd,shipped)
# For __bswapsi2, __bswapdi2
bswapsdi2 = $(obj)/bswapsdi2.o
$(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
$(call cmd,shipped)
OBJS += lib1funcs.o ashldi3.o bswapsdi2.o
# We need to prevent any GOTOFF relocs being used with references
# to symbols in the .bss section since we cannot relocate them
......@@ -175,8 +155,8 @@ fi
efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.o \
$(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
$(bswapsdi2) $(efi-obj-y) FORCE
$(addprefix $(obj)/, $(OBJS)) \
$(efi-obj-y) FORCE
@$(check_for_multiple_zreladdr)
$(call if_changed,ld)
@$(check_for_bad_syms)
......@@ -187,11 +167,4 @@ $(obj)/piggy_data: $(obj)/../Image FORCE
$(obj)/piggy.o: $(obj)/piggy_data
CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC)
$(call cmd,shipped)
AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
$(call cmd,shipped)
/* SPDX-License-Identifier: GPL-2.0-only */
/* For __aeabi_llsl */
#include "../../lib/ashldi3.S"
/* SPDX-License-Identifier: GPL-2.0-only */
/* For __bswapsi2, __bswapdi2 */
#include "../../lib/bswapsdi2.S"
......@@ -9,16 +9,22 @@
#include <linux/sizes.h>
.macro __nop
#ifdef CONFIG_EFI_STUB
@ This is almost but not quite a NOP, since it does clobber the
@ condition flags. But it is the best we can do for EFI, since
@ PE/COFF expects the magic string "MZ" at offset 0, while the
@ ARM/Linux boot protocol expects an executable instruction
@ there.
.inst MZ_MAGIC | (0x1310 << 16) @ tstne r0, #0x4d000
#else
AR_CLASS( mov r0, r0 )
M_CLASS( nop.w )
.endm
.macro __initial_nops
#ifdef CONFIG_EFI_STUB
@ This is a two-instruction NOP, which happens to bear the
@ PE/COFF signature "MZ" in the first two bytes, so the kernel
@ is accepted as an EFI binary. Booting via the UEFI stub
@ will not execute those instructions, but the ARM/Linux
@ boot protocol does, so we need some NOPs here.
.inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000
eor r5, r5, 0x4d000 @ undo previous insn
#else
__nop
__nop
#endif
.endm
......
// SPDX-License-Identifier: GPL-2.0-only
#include "../../../../lib/fonts/font_acorn_8x8.c"
......@@ -203,7 +203,8 @@ start:
* were patching the initial instructions of the kernel, i.e
* had started to exploit this "patch area".
*/
.rept 7
__initial_nops
.rept 5
__nop
.endr
#ifndef CONFIG_THUMB2_KERNEL
......
/* SPDX-License-Identifier: GPL-2.0-only */
#include "../../kernel/hyp-stub.S"
/* SPDX-License-Identifier: GPL-2.0-only */
/* For __aeabi_uidivmod */
#include "../../lib/lib1funcs.S"
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ARM_KFENCE_H
#define __ASM_ARM_KFENCE_H
#include <linux/kfence.h>
#include <asm/pgalloc.h>
#include <asm/set_memory.h>
static inline int split_pmd_page(pmd_t *pmd, unsigned long addr)
{
int i;
unsigned long pfn = PFN_DOWN(__pa(addr));
pte_t *pte = pte_alloc_one_kernel(&init_mm);
if (!pte)
return -ENOMEM;
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
pmd_populate_kernel(&init_mm, pmd, pte);
flush_tlb_kernel_range(addr, addr + PMD_SIZE);
return 0;
}
static inline bool arch_kfence_init_pool(void)
{
unsigned long addr;
pmd_t *pmd;
for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr);
addr += PAGE_SIZE) {
pmd = pmd_off_k(addr);
if (pmd_leaf(*pmd)) {
if (split_pmd_page(pmd, addr & PMD_MASK))
return false;
}
}
return true;
}
static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
set_memory_valid(addr, 1, !protect);
return true;
}
#endif /* __ASM_ARM_KFENCE_H */
......@@ -11,6 +11,7 @@ int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_valid(unsigned long addr, int numpages, int enable);
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
......
......@@ -23,23 +23,9 @@
*/
extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
static inline void set_ti_cpu(struct task_struct *p)
{
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
* The core code no longer maintains the thread_info::cpu field once
* CONFIG_THREAD_INFO_IN_TASK is in effect, but we rely on it for
* raw_smp_processor_id(), which cannot access struct task_struct*
* directly for reasons of circular #inclusion hell.
*/
task_thread_info(p)->cpu = task_cpu(p);
#endif
}
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
set_ti_cpu(next); \
if (IS_ENABLED(CONFIG_CURRENT_POINTER_IN_TPIDRURO)) \
__this_cpu_write(__entry_task, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
......
......@@ -154,9 +154,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
#endif
secondary_data.task = idle;
if (IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK))
task_thread_info(idle)->cpu = cpu;
sync_cache_w(&secondary_data);
/*
......
......@@ -67,12 +67,14 @@ void dump_backtrace_entry(unsigned long where, unsigned long from,
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
#ifdef CONFIG_KALLSYMS
#ifndef CONFIG_KALLSYMS
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
loglvl, where, from);
#elif defined CONFIG_BACKTRACE_VERBOSE
printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
loglvl, where, (void *)where, from, (void *)from);
#else
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
loglvl, where, from);
printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from);
#endif
if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
......
......@@ -17,6 +17,7 @@
#include <linux/sched/debug.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
#include <linux/kfence.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
......@@ -99,6 +100,11 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
{ }
#endif /* CONFIG_MMU */
static inline bool is_write_fault(unsigned int fsr)
{
return (fsr & FSR_WRITE) && !(fsr & FSR_CM);
}
static void die_kernel_fault(const char *msg, struct mm_struct *mm,
unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
......@@ -131,10 +137,14 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
/*
* No handler, we'll have to terminate things with extreme prejudice.
*/
if (addr < PAGE_SIZE)
if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
else
} else {
if (kfence_handle_page_fault(addr, is_write_fault(fsr), regs))
return;
msg = "paging request";
}
die_kernel_fault(msg, mm, addr, fsr, regs);
}
......@@ -191,8 +201,8 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
}
#ifdef CONFIG_MMU
#define VM_FAULT_BADMAP 0x010000
#define VM_FAULT_BADACCESS 0x020000
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
static inline bool is_permission_fault(unsigned int fsr)
{
......@@ -261,7 +271,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) {
if (is_write_fault(fsr)) {
flags |= FAULT_FLAG_WRITE;
vm_flags = VM_WRITE;
}
......
......@@ -32,14 +32,31 @@ static bool in_range(unsigned long start, unsigned long size,
size <= range_end - start;
}
/*
* This function assumes that the range is mapped with PAGE_SIZE pages.
*/
static int __change_memory_common(unsigned long start, unsigned long size,
pgprot_t set_mask, pgprot_t clear_mask)
{
struct page_change_data data;
int ret;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, start + size);
return ret;
}
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
unsigned long start = addr & PAGE_MASK;
unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
unsigned long size = end - start;
int ret;
struct page_change_data data;
WARN_ON_ONCE(start != addr);
......@@ -50,14 +67,7 @@ static int change_memory_common(unsigned long addr, int numpages,
!in_range(start, size, VMALLOC_START, VMALLOC_END))
return -EINVAL;
data.set_mask = set_mask;
data.clear_mask = clear_mask;
ret = apply_to_page_range(&init_mm, start, size, change_page_range,
&data);
flush_tlb_kernel_range(start, end);
return ret;
return __change_memory_common(start, size, set_mask, clear_mask);
}
int set_memory_ro(unsigned long addr, int numpages)
......@@ -87,3 +97,15 @@ int set_memory_x(unsigned long addr, int numpages)
__pgprot(0),
__pgprot(L_PTE_XN));
}
int set_memory_valid(unsigned long addr, int numpages, int enable)
{
if (enable)
return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(L_PTE_VALID),
__pgprot(0));
else
return __change_memory_common(addr, PAGE_SIZE * numpages,
__pgprot(0),
__pgprot(L_PTE_VALID));
}
......@@ -193,6 +193,26 @@ ENDPROC(__v7m_setup)
.long \cache_fns
.endm
/*
* Match ARM Cortex-M55 processor.
*/
.type __v7m_cm55_proc_info, #object
__v7m_cm55_proc_info:
.long 0x410fd220 /* ARM Cortex-M55 0xD22 */
.long 0xff0ffff0 /* Mask off revision, patch release */
__v7m_proc __v7m_cm55_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
.size __v7m_cm55_proc_info, . - __v7m_cm55_proc_info
/*
* Match ARM Cortex-M33 processor.
*/
.type __v7m_cm33_proc_info, #object
__v7m_cm33_proc_info:
.long 0x410fd210 /* ARM Cortex-M33 0xD21 */
.long 0xff0ffff0 /* Mask off revision, patch release */
__v7m_proc __v7m_cm33_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
.size __v7m_cm33_proc_info, . - __v7m_cm33_proc_info
/*
* Match ARM Cortex-M7 processor.
*/
......
......@@ -21,8 +21,6 @@
#include <linux/reset.h>
#include <linux/of_irq.h>
#include <asm/irq.h>
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
/* called on periphid match and class 0x9 coresight device. */
......@@ -136,8 +134,6 @@ static ssize_t name##_show(struct device *_dev, \
static DEVICE_ATTR_RO(name)
amba_attr_func(id, "%08x\n", dev->periphid);
amba_attr_func(irq0, "%u\n", dev->irq[0]);
amba_attr_func(irq1, "%u\n", dev->irq[1]);
amba_attr_func(resource, "\t%016llx\t%016llx\t%016lx\n",
(unsigned long long)dev->res.start, (unsigned long long)dev->res.end,
dev->res.flags);
......@@ -175,6 +171,28 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
return retval;
}
static int of_amba_device_decode_irq(struct amba_device *dev)
{
struct device_node *node = dev->dev.of_node;
int i, irq = 0;
if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
/* Decode the IRQs and address ranges */
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = of_irq_get(node, i);
if (irq < 0) {
if (irq == -EPROBE_DEFER)
return irq;
irq = 0;
}
dev->irq[i] = irq;
}
}
return 0;
}
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
......@@ -187,6 +205,10 @@ static int amba_probe(struct device *dev)
int ret;
do {
ret = of_amba_device_decode_irq(pcdev);
if (ret)
break;
ret = of_clk_set_defaults(dev->of_node, false);
if (ret < 0)
break;
......@@ -372,38 +394,12 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
static int of_amba_device_decode_irq(struct amba_device *dev)
{
struct device_node *node = dev->dev.of_node;
int i, irq = 0;
if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
/* Decode the IRQs and address ranges */
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = of_irq_get(node, i);
if (irq < 0) {
if (irq == -EPROBE_DEFER)
return irq;
irq = 0;
}
dev->irq[i] = irq;
}
}
return 0;
}
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
int i, ret;
ret = of_amba_device_decode_irq(dev);
if (ret)
goto err_out;
ret = request_resource(parent, &dev->res);
if (ret)
goto err_out;
......@@ -488,20 +484,9 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
if (dev->irq[0])
ret = device_create_file(&dev->dev, &dev_attr_irq0);
if (ret == 0 && dev->irq[1])
ret = device_create_file(&dev->dev, &dev_attr_irq1);
if (ret == 0)
return ret;
device_unregister(&dev->dev);
err_release:
release_resource(&dev->res);
if (ret)
release_resource(&dev->res);
err_out:
return ret;
......
......@@ -24,7 +24,7 @@ set -e
# with O=, make sure to remove the stale files in the output tree. Otherwise,
# the build system wrongly compiles the stale ones.
if [ -n "${building_out_of_srctree}" ]; then
for f in fdt_rw.c fdt_ro.c fdt_wip.c fdt.c
for f in fdt_rw.c fdt_ro.c fdt_wip.c fdt.c ashldi3.S bswapsdi2.S font.c lib1funcs.S hyp-stub.S
do
rm -f arch/arm/boot/compressed/${f}
done
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment