Commit 36304006 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-6.12' of...

Merge tag 'loongarch-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch updates from Huacai Chen:

 - Fix objtool about do_syscall() and Clang

 - Enable generic CPU vulnerabilites support

 - Enable ACPI BGRT handling

 - Rework CPU feature probe from CPUCFG/IOCSR

 - Add ARCH_HAS_SET_MEMORY support

 - Add ARCH_HAS_SET_DIRECT_MAP support

 - Improve hardware page table walker

 - Simplify _percpu_read() and _percpu_write()

 - Add advanced extended IRQ model documentions

 - Some bug fixes and other small changes

* tag 'loongarch-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  Docs/LoongArch: Add advanced extended IRQ model description
  LoongArch: Remove posix_types.h include from sigcontext.h
  LoongArch: Fix memleak in pci_acpi_scan_root()
  LoongArch: Simplify _percpu_read() and _percpu_write()
  LoongArch: Improve hardware page table walker
  LoongArch: Add ARCH_HAS_SET_DIRECT_MAP support
  LoongArch: Add ARCH_HAS_SET_MEMORY support
  LoongArch: Rework CPU feature probe from CPUCFG/IOCSR
  LoongArch: Enable ACPI BGRT handling
  LoongArch: Enable generic CPU vulnerabilites support
  LoongArch: Remove STACK_FRAME_NON_STANDARD(do_syscall)
  LoongArch: Set AS_HAS_THIN_ADD_SUB as y if AS_IS_LLVM
  LoongArch: Enable objtool for Clang
  objtool: Handle frame pointer related instructions
parents ec384984 f339bd3b
......@@ -85,6 +85,38 @@ to CPUINTC directly::
| Devices |
+---------+
Advanced Extended IRQ model
===========================
In this model, IPI (Inter-Processor Interrupt) and CPU Local Timer interrupt go
to CPUINTC directly, CPU UARTS interrupts go to LIOINTC, PCH-MSI interrupts go
to AVECINTC, and then go to CPUINTC directly, while all other devices interrupts
go to PCH-PIC/PCH-LPC and gathered by EIOINTC, and then go to CPUINTC directly::
+-----+ +-----------------------+ +-------+
| IPI | --> | CPUINTC | <-- | Timer |
+-----+ +-----------------------+ +-------+
^ ^ ^
| | |
+---------+ +----------+ +---------+ +-------+
| EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+---------+ +----------+ +---------+ +-------+
^ ^
| |
+---------+ +---------+
| PCH-PIC | | PCH-MSI |
+---------+ +---------+
^ ^ ^
| | |
+---------+ +---------+ +---------+
| Devices | | PCH-LPC | | Devices |
+---------+ +---------+ +---------+
^
|
+---------+
| Devices |
+---------+
ACPI-related definitions
========================
......
......@@ -87,6 +87,38 @@ PCH-LPC/PCH-MSI,然后被EIOINTC统一收集,再直接到达CPUINTC::
| Devices |
+---------+
高级扩展IRQ模型
===============
在这种模型里面,IPI(Inter-Processor Interrupt)和CPU本地时钟中断直接发送到CPUINTC,
CPU串口(UARTs)中断发送到LIOINTC,PCH-MSI中断发送到AVECINTC,而后通过AVECINTC直接
送达CPUINTC,而其他所有设备的中断则分别发送到所连接的PCH-PIC/PCH-LPC,然后由EIOINTC
统一收集,再直接到达CPUINTC::
+-----+ +-----------------------+ +-------+
| IPI | --> | CPUINTC | <-- | Timer |
+-----+ +-----------------------+ +-------+
^ ^ ^
| | |
+---------+ +----------+ +---------+ +-------+
| EIOINTC | | AVECINTC | | LIOINTC | <-- | UARTs |
+---------+ +----------+ +---------+ +-------+
^ ^
| |
+---------+ +---------+
| PCH-PIC | | PCH-MSI |
+---------+ +---------+
^ ^ ^
| | |
+---------+ +---------+ +---------+
| Devices | | PCH-LPC | | Devices |
+---------+ +---------+ +---------+
^
|
+---------+
| Devices |
+---------+
ACPI相关的定义
==============
......
......@@ -25,6 +25,8 @@ config LOONGARCH
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
......@@ -82,6 +84,7 @@ config LOONGARCH
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES
select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
select GENERIC_GETTIMEOFDAY
select GENERIC_IOREMAP if !ARCH_IOREMAP
......@@ -147,7 +150,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
......@@ -267,7 +270,7 @@ config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
config AS_HAS_THIN_ADD_SUB
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
def_bool $(cc-option,-Wa$(comma)-mthin-add-sub) || AS_IS_LLVM
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
......
......@@ -15,6 +15,7 @@
#define __LL "ll.w "
#define __SC "sc.w "
#define __AMADD "amadd.w "
#define __AMOR "amor.w "
#define __AMAND_DB "amand_db.w "
#define __AMOR_DB "amor_db.w "
#define __AMXOR_DB "amxor_db.w "
......@@ -22,6 +23,7 @@
#define __LL "ll.d "
#define __SC "sc.d "
#define __AMADD "amadd.d "
#define __AMOR "amor.d "
#define __AMAND_DB "amand_db.d "
#define __AMOR_DB "amor_db.d "
#define __AMXOR_DB "amxor_db.d "
......
......@@ -51,6 +51,7 @@
#define cpu_has_lbt_mips cpu_opt(LOONGARCH_CPU_LBT_MIPS)
#define cpu_has_lbt (cpu_has_lbt_x86|cpu_has_lbt_arm|cpu_has_lbt_mips)
#define cpu_has_csr cpu_opt(LOONGARCH_CPU_CSR)
#define cpu_has_iocsr cpu_opt(LOONGARCH_CPU_IOCSR)
#define cpu_has_tlb cpu_opt(LOONGARCH_CPU_TLB)
#define cpu_has_watch cpu_opt(LOONGARCH_CPU_WATCH)
#define cpu_has_vint cpu_opt(LOONGARCH_CPU_VINT)
......@@ -65,6 +66,7 @@
#define cpu_has_guestid cpu_opt(LOONGARCH_CPU_GUESTID)
#define cpu_has_hypervisor cpu_opt(LOONGARCH_CPU_HYPERVISOR)
#define cpu_has_ptw cpu_opt(LOONGARCH_CPU_PTW)
#define cpu_has_lspw cpu_opt(LOONGARCH_CPU_LSPW)
#define cpu_has_avecint cpu_opt(LOONGARCH_CPU_AVECINT)
#endif /* __ASM_CPU_FEATURES_H */
......@@ -87,19 +87,21 @@ enum cpu_type_enum {
#define CPU_FEATURE_LBT_MIPS 12 /* CPU has MIPS Binary Translation */
#define CPU_FEATURE_TLB 13 /* CPU has TLB */
#define CPU_FEATURE_CSR 14 /* CPU has CSR */
#define CPU_FEATURE_WATCH 15 /* CPU has watchpoint registers */
#define CPU_FEATURE_VINT 16 /* CPU has vectored interrupts */
#define CPU_FEATURE_CSRIPI 17 /* CPU has CSR-IPI */
#define CPU_FEATURE_EXTIOI 18 /* CPU has EXT-IOI */
#define CPU_FEATURE_PREFETCH 19 /* CPU has prefetch instructions */
#define CPU_FEATURE_PMP 20 /* CPU has perfermance counter */
#define CPU_FEATURE_SCALEFREQ 21 /* CPU supports cpufreq scaling */
#define CPU_FEATURE_FLATMODE 22 /* CPU has flat mode */
#define CPU_FEATURE_EIODECODE 23 /* CPU has EXTIOI interrupt pin decode mode */
#define CPU_FEATURE_GUESTID 24 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 25 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 26 /* CPU has hardware page table walker */
#define CPU_FEATURE_AVECINT 27 /* CPU has avec interrupt */
#define CPU_FEATURE_IOCSR 15 /* CPU has IOCSR */
#define CPU_FEATURE_WATCH 16 /* CPU has watchpoint registers */
#define CPU_FEATURE_VINT 17 /* CPU has vectored interrupts */
#define CPU_FEATURE_CSRIPI 18 /* CPU has CSR-IPI */
#define CPU_FEATURE_EXTIOI 19 /* CPU has EXT-IOI */
#define CPU_FEATURE_PREFETCH 20 /* CPU has prefetch instructions */
#define CPU_FEATURE_PMP 21 /* CPU has perfermance counter */
#define CPU_FEATURE_SCALEFREQ 22 /* CPU supports cpufreq scaling */
#define CPU_FEATURE_FLATMODE 23 /* CPU has flat mode */
#define CPU_FEATURE_EIODECODE 24 /* CPU has EXTIOI interrupt pin decode mode */
#define CPU_FEATURE_GUESTID 25 /* CPU has GuestID feature */
#define CPU_FEATURE_HYPERVISOR 26 /* CPU has hypervisor (running in VM) */
#define CPU_FEATURE_PTW 27 /* CPU has hardware page table walker */
#define CPU_FEATURE_LSPW 28 /* CPU has LSPW (lddir/ldpte instructions) */
#define CPU_FEATURE_AVECINT 29 /* CPU has AVEC interrupt */
#define LOONGARCH_CPU_CPUCFG BIT_ULL(CPU_FEATURE_CPUCFG)
#define LOONGARCH_CPU_LAM BIT_ULL(CPU_FEATURE_LAM)
......@@ -115,6 +117,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_LBT_ARM BIT_ULL(CPU_FEATURE_LBT_ARM)
#define LOONGARCH_CPU_LBT_MIPS BIT_ULL(CPU_FEATURE_LBT_MIPS)
#define LOONGARCH_CPU_TLB BIT_ULL(CPU_FEATURE_TLB)
#define LOONGARCH_CPU_IOCSR BIT_ULL(CPU_FEATURE_IOCSR)
#define LOONGARCH_CPU_CSR BIT_ULL(CPU_FEATURE_CSR)
#define LOONGARCH_CPU_WATCH BIT_ULL(CPU_FEATURE_WATCH)
#define LOONGARCH_CPU_VINT BIT_ULL(CPU_FEATURE_VINT)
......@@ -128,6 +131,7 @@ enum cpu_type_enum {
#define LOONGARCH_CPU_GUESTID BIT_ULL(CPU_FEATURE_GUESTID)
#define LOONGARCH_CPU_HYPERVISOR BIT_ULL(CPU_FEATURE_HYPERVISOR)
#define LOONGARCH_CPU_PTW BIT_ULL(CPU_FEATURE_PTW)
#define LOONGARCH_CPU_LSPW BIT_ULL(CPU_FEATURE_LSPW)
#define LOONGARCH_CPU_AVECINT BIT_ULL(CPU_FEATURE_AVECINT)
#endif /* _ASM_CPU_H */
......@@ -62,6 +62,7 @@
#define LOONGARCH_CPUCFG1 0x1
#define CPUCFG1_ISGR32 BIT(0)
#define CPUCFG1_ISGR64 BIT(1)
#define CPUCFG1_ISA GENMASK(1, 0)
#define CPUCFG1_PAGING BIT(2)
#define CPUCFG1_IOCSR BIT(3)
#define CPUCFG1_PABITS GENMASK(11, 4)
......
......@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
/* Normal, classic get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
u64 asid = asid_cache(cpu);
if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
local_flush_tlb_user(); /* start new asid cycle */
*need_flush = true; /* start new asid cycle */
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
......@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
return 0;
}
static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
{
__asm__ __volatile__(
"csrwr %[pgdl_val], %[pgdl_reg] \n\t"
"csrwr %[asid_val], %[asid_reg] \n\t"
: [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
: [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
: "memory"
);
}
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
bool need_flush = false;
unsigned int cpu = smp_processor_id();
/* Check if our ASID is of an older version and thus invalid */
if (!asid_valid(next, cpu))
get_new_mmu_context(next, cpu);
write_csr_asid(cpu_asid(cpu, next));
get_new_mmu_context(next, cpu, &need_flush);
if (next != &init_mm)
csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
else
csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);
if (need_flush)
local_flush_tlb_user(); /* Flush tlb after update ASID */
/*
* Mark current->active_mm as not "active" anymore.
......@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);
if (asid == cpu_asid(cpu, mm)) {
bool need_flush = false;
if (!current->mm || (current->mm == mm)) {
get_new_mmu_context(mm, cpu);
get_new_mmu_context(mm, cpu, &need_flush);
write_csr_asid(cpu_asid(cpu, mm));
if (need_flush)
local_flush_tlb_user(); /* Flush tlb after update ASID */
goto out;
}
}
......
......@@ -68,75 +68,6 @@ PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static __always_inline unsigned long __percpu_read(void __percpu *ptr, int size)
{
unsigned long ret;
switch (size) {
case 1:
__asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 2:
__asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 4:
__asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 8:
__asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
default:
ret = 0;
BUILD_BUG();
}
return ret;
}
static __always_inline void __percpu_write(void __percpu *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
__asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 2:
__asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 4:
__asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 8:
__asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
default:
BUILD_BUG();
}
}
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val, int size)
{
switch (size) {
......@@ -157,6 +88,33 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return 0;
}
#define __pcpu_op_1(op) op ".b "
#define __pcpu_op_2(op) op ".h "
#define __pcpu_op_4(op) op ".w "
#define __pcpu_op_8(op) op ".d "
#define _percpu_read(size, _pcp) \
({ \
typeof(_pcp) __pcp_ret; \
\
__asm__ __volatile__( \
__pcpu_op_##size("ldx") "%[ret], $r21, %[ptr] \n" \
: [ret] "=&r"(__pcp_ret) \
: [ptr] "r"(&(_pcp)) \
: "memory"); \
\
__pcp_ret; \
})
#define _percpu_write(size, _pcp, _val) \
do { \
__asm__ __volatile__( \
__pcpu_op_##size("stx") "%[val], $r21, %[ptr] \n" \
: \
: [val] "r"(_val), [ptr] "r"(&(_pcp)) \
: "memory"); \
} while (0)
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
......@@ -167,18 +125,6 @@ static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
__ret; \
})
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
__retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
__retval; \
})
#define _percpu_write(pcp, val) \
do { \
__percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
} while (0) \
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
......@@ -215,15 +161,15 @@ do { \
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
#define this_cpu_read_1(pcp) _percpu_read(pcp)
#define this_cpu_read_2(pcp) _percpu_read(pcp)
#define this_cpu_read_4(pcp) _percpu_read(pcp)
#define this_cpu_read_8(pcp) _percpu_read(pcp)
#define this_cpu_read_1(pcp) _percpu_read(1, pcp)
#define this_cpu_read_2(pcp) _percpu_read(2, pcp)
#define this_cpu_read_4(pcp) _percpu_read(4, pcp)
#define this_cpu_read_8(pcp) _percpu_read(8, pcp)
#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_1(pcp, val) _percpu_write(1, pcp, val)
#define this_cpu_write_2(pcp, val) _percpu_write(2, pcp, val)
#define this_cpu_write_4(pcp, val) _percpu_write(4, pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(8, pcp, val)
#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
......
......@@ -331,29 +331,23 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
if (pte_none(ptep_get(buddy))) {
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
__asm__ __volatile__ (
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
" or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n"
" nop \n"
"2: \n"
__WEAK_LLSC_MB
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
/*
* For SMP, multiple CPUs can race, so we need
* to do this atomically.
*/
__asm__ __volatile__(
__AMOR "$zero, %[global], %[buddy] \n"
: [buddy] "+ZB" (buddy->pte)
: [global] "r" (_PAGE_GLOBAL)
: "memory");
DBAR(0b11000); /* o_wrw = 0b11000 */
#else /* !CONFIG_SMP */
if (pte_none(ptep_get(buddy)))
WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */
}
}
}
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#ifndef _ASM_LOONGARCH_SET_MEMORY_H
#define _ASM_LOONGARCH_SET_MEMORY_H
/*
* Functions to change memory attributes.
*/
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
bool kernel_page_present(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_invalid_noflush(struct page *page);
#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
......@@ -17,5 +17,6 @@
#define HWCAP_LOONGARCH_LBT_ARM (1 << 11)
#define HWCAP_LOONGARCH_LBT_MIPS (1 << 12)
#define HWCAP_LOONGARCH_PTW (1 << 13)
#define HWCAP_LOONGARCH_LSPW (1 << 14)
#endif /* _UAPI_ASM_HWCAP_H */
......@@ -9,7 +9,6 @@
#define _UAPI_ASM_SIGCONTEXT_H
#include <linux/types.h>
#include <linux/posix_types.h>
/* FP context was used */
#define SC_USED_FP (1 << 0)
......
......@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/efi-bgrt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/memblock.h>
......@@ -212,6 +213,9 @@ void __init acpi_boot_table_init(void)
/* Do not enable ACPI SPCR console by default */
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
return;
fdt_earlycon:
......
......@@ -91,12 +91,30 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
unsigned int config;
unsigned long asid_mask;
c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR | LOONGARCH_CPU_VINT;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
config = read_cpucfg(LOONGARCH_CPUCFG1);
switch (config & CPUCFG1_ISA) {
case 0:
set_isa(c, LOONGARCH_CPU_ISA_LA32R);
break;
case 1:
set_isa(c, LOONGARCH_CPU_ISA_LA32S);
break;
case 2:
set_isa(c, LOONGARCH_CPU_ISA_LA64);
break;
default:
pr_warn("Warning: unknown ISA level\n");
}
if (config & CPUCFG1_PAGING)
c->options |= LOONGARCH_CPU_TLB;
if (config & CPUCFG1_IOCSR)
c->options |= LOONGARCH_CPU_IOCSR;
if (config & CPUCFG1_UAL) {
c->options |= LOONGARCH_CPU_UAL;
elf_hwcap |= HWCAP_LOONGARCH_UAL;
......@@ -139,6 +157,10 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_PTW;
elf_hwcap |= HWCAP_LOONGARCH_PTW;
}
if (config & CPUCFG2_LSPW) {
c->options |= LOONGARCH_CPU_LSPW;
elf_hwcap |= HWCAP_LOONGARCH_LSPW;
}
if (config & CPUCFG2_LVZP) {
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
......@@ -162,22 +184,6 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
if (config & CPUCFG6_PMP)
c->options |= LOONGARCH_CPU_PMP;
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
if (config & IOCSRF_CSRIPI)
c->options |= LOONGARCH_CPU_CSRIPI;
if (config & IOCSRF_EXTIOI)
c->options |= LOONGARCH_CPU_EXTIOI;
if (config & IOCSRF_FREQSCALE)
c->options |= LOONGARCH_CPU_SCALEFREQ;
if (config & IOCSRF_FLATMODE)
c->options |= LOONGARCH_CPU_FLATMODE;
if (config & IOCSRF_EIODECODE)
c->options |= LOONGARCH_CPU_EIODECODE;
if (config & IOCSRF_AVEC)
c->options |= LOONGARCH_CPU_AVECINT;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;
config = csr_read32(LOONGARCH_CSR_ASID);
config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
asid_mask = GENMASK(config - 1, 0);
......@@ -210,6 +216,9 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
default:
pr_warn("Warning: unknown TLB type\n");
}
if (get_num_brps() + get_num_wrps())
c->options |= LOONGARCH_CPU_WATCH;
}
#define MAX_NAME_LEN 32
......@@ -220,52 +229,67 @@ static char cpu_full_name[MAX_NAME_LEN] = " - ";
static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
{
uint32_t config;
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
const char *core_name = "Unknown";
if (!__cpu_full_name[cpu])
__cpu_full_name[cpu] = cpu_full_name;
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
switch (c->processor_id & PRID_SERIES_MASK) {
case PRID_SERIES_LA132:
switch (BIT(fls(c->isa_level) - 1)) {
case LOONGARCH_CPU_ISA_LA32R:
case LOONGARCH_CPU_ISA_LA32S:
c->cputype = CPU_LOONGSON32;
set_isa(c, LOONGARCH_CPU_ISA_LA32S);
__cpu_family[cpu] = "Loongson-32bit";
pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
break;
case PRID_SERIES_LA264:
case LOONGARCH_CPU_ISA_LA64:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
break;
}
switch (c->processor_id & PRID_SERIES_MASK) {
case PRID_SERIES_LA132:
core_name = "LA132";
break;
case PRID_SERIES_LA264:
core_name = "LA264";
break;
case PRID_SERIES_LA364:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
core_name = "LA364";
break;
case PRID_SERIES_LA464:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
core_name = "LA464";
break;
case PRID_SERIES_LA664:
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
core_name = "LA664";
break;
default: /* Default to 64 bit */
c->cputype = CPU_LOONGSON64;
set_isa(c, LOONGARCH_CPU_ISA_LA64);
__cpu_family[cpu] = "Loongson-64bit";
pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
}
pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
if (!cpu_has_iocsr)
return;
if (!__cpu_full_name[cpu])
__cpu_full_name[cpu] = cpu_full_name;
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
if (config & IOCSRF_CSRIPI)
c->options |= LOONGARCH_CPU_CSRIPI;
if (config & IOCSRF_EXTIOI)
c->options |= LOONGARCH_CPU_EXTIOI;
if (config & IOCSRF_FREQSCALE)
c->options |= LOONGARCH_CPU_SCALEFREQ;
if (config & IOCSRF_FLATMODE)
c->options |= LOONGARCH_CPU_FLATMODE;
if (config & IOCSRF_EIODECODE)
c->options |= LOONGARCH_CPU_EIODECODE;
if (config & IOCSRF_AVEC)
c->options |= LOONGARCH_CPU_AVECINT;
if (config & IOCSRF_VM)
c->options |= LOONGARCH_CPU_HYPERVISOR;
}
#ifdef CONFIG_64BIT
......
......@@ -31,6 +31,7 @@ int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned int isa = cpu_data[n].isa_level;
unsigned int version = cpu_data[n].processor_id & 0xff;
unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
......@@ -64,9 +65,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_pabits + 1, cpu_vabits + 1);
seq_printf(m, "ISA\t\t\t:");
if (cpu_has_loongarch32)
seq_printf(m, " loongarch32");
if (cpu_has_loongarch64)
if (isa & LOONGARCH_CPU_ISA_LA32R)
seq_printf(m, " loongarch32r");
if (isa & LOONGARCH_CPU_ISA_LA32S)
seq_printf(m, " loongarch32s");
if (isa & LOONGARCH_CPU_ISA_LA64)
seq_printf(m, " loongarch64");
seq_printf(m, "\n");
......@@ -81,6 +84,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_complex) seq_printf(m, " complex");
if (cpu_has_crypto) seq_printf(m, " crypto");
if (cpu_has_ptw) seq_printf(m, " ptw");
if (cpu_has_lspw) seq_printf(m, " lspw");
if (cpu_has_lvz) seq_printf(m, " lvz");
if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
......
......@@ -79,7 +79,3 @@ void noinstr __no_stack_protector do_syscall(struct pt_regs *regs)
syscall_exit_to_user_mode(regs);
}
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
STACK_FRAME_NON_STANDARD(do_syscall);
#endif
......@@ -4,7 +4,8 @@
#
obj-y += init.o cache.o tlb.o tlbex.o extable.o \
fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
fault.o ioremap.o maccess.o mmap.o pgtable.o \
page.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
......
......@@ -31,11 +31,52 @@
int show_unhandled_signals = 1;
static int __kprobes spurious_fault(unsigned long write, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
if (!(address & __UA_LIMIT))
return 0;
pgd = pgd_offset_k(address);
if (!pgd_present(pgdp_get(pgd)))
return 0;
p4d = p4d_offset(pgd, address);
if (!p4d_present(p4dp_get(p4d)))
return 0;
pud = pud_offset(p4d, address);
if (!pud_present(pudp_get(pud)))
return 0;
pmd = pmd_offset(pud, address);
if (!pmd_present(pmdp_get(pmd)))
return 0;
if (pmd_leaf(*pmd)) {
return write ? pmd_write(pmdp_get(pmd)) : 1;
} else {
pte = pte_offset_kernel(pmd, address);
if (!pte_present(ptep_get(pte)))
return 0;
return write ? pte_write(ptep_get(pte)) : 1;
}
}
static void __kprobes no_context(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
if (spurious_fault(write, address))
return;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2024 Loongson Technology Corporation Limited
*/
#include <linux/pagewalk.h>
#include <linux/pgtable.h>
#include <asm/set_memory.h>
#include <asm/tlbflush.h>
struct pageattr_masks {
pgprot_t set_mask;
pgprot_t clear_mask;
};
static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
{
unsigned long new_val = val;
struct pageattr_masks *masks = walk->private;
new_val &= ~(pgprot_val(masks->clear_mask));
new_val |= (pgprot_val(masks->set_mask));
return new_val;
}
static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pgd_t val = pgdp_get(pgd);
if (pgd_leaf(val)) {
val = __pgd(set_pageattr_masks(pgd_val(val), walk));
set_pgd(pgd, val);
}
return 0;
}
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
p4d_t val = p4dp_get(p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
set_p4d(p4d, val);
}
return 0;
}
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pud_t val = pudp_get(pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
set_pud(pud, val);
}
return 0;
}
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pmd_t val = pmdp_get(pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
set_pmd(pmd, val);
}
return 0;
}
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
return 0;
}
static int pageattr_pte_hole(unsigned long addr, unsigned long next,
int depth, struct mm_walk *walk)
{
return 0;
}
static const struct mm_walk_ops pageattr_ops = {
.pgd_entry = pageattr_pgd_entry,
.p4d_entry = pageattr_p4d_entry,
.pud_entry = pageattr_pud_entry,
.pmd_entry = pageattr_pmd_entry,
.pte_entry = pageattr_pte_entry,
.pte_hole = pageattr_pte_hole,
.walk_lock = PGWALK_RDLOCK,
};
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask)
{
int ret;
unsigned long start = addr;
unsigned long end = start + PAGE_SIZE * numpages;
struct pageattr_masks masks = {
.set_mask = set_mask,
.clear_mask = clear_mask
};
if (!numpages)
return 0;
mmap_write_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks);
mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
return ret;
}
int set_memory_x(unsigned long addr, int numpages)
{
if (addr < vm_map_base)
return 0;
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_NO_EXEC));
}
int set_memory_nx(unsigned long addr, int numpages)
{
if (addr < vm_map_base)
return 0;
return __set_memory(addr, numpages, __pgprot(_PAGE_NO_EXEC), __pgprot(0));
}
int set_memory_ro(unsigned long addr, int numpages)
{
if (addr < vm_map_base)
return 0;
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_WRITE | _PAGE_DIRTY));
}
int set_memory_rw(unsigned long addr, int numpages)
{
if (addr < vm_map_base)
return 0;
return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
}
bool kernel_page_present(struct page *page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return true;
pgd = pgd_offset_k(addr);
if (pgd_none(pgdp_get(pgd)))
return false;
if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
if (p4d_none(p4dp_get(p4d)))
return false;
if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
if (pud_none(pudp_get(pud)))
return false;
if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
if (pmd_none(pmdp_get(pmd)))
return false;
if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(ptep_get(pte));
}
int set_direct_map_default_noflush(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return 0;
return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
}
int set_direct_map_invalid_noflush(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return 0;
return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
}
......@@ -225,6 +225,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
if (bus) {
memcpy(bus->sysdata, info->cfg, sizeof(struct pci_config_window));
kfree(info);
kfree(root_ops);
} else {
struct pci_bus *child;
......
......@@ -451,7 +451,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
depends on EFI && (X86 || ARM64)
depends on EFI && (X86 || ARM64 || LOONGARCH)
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
......
......@@ -122,7 +122,7 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
switch (inst.reg2i12_format.opcode) {
case addid_op:
if ((inst.reg2i12_format.rd == CFI_SP) || (inst.reg2i12_format.rj == CFI_SP)) {
/* addi.d sp,sp,si12 or addi.d fp,sp,si12 */
/* addi.d sp,sp,si12 or addi.d fp,sp,si12 or addi.d sp,fp,si12 */
insn->immediate = sign_extend64(inst.reg2i12_format.immediate, 11);
ADD_OP(op) {
op->src.type = OP_SRC_ADD;
......@@ -132,6 +132,15 @@ static bool decode_insn_reg2i12_fomat(union loongarch_instruction inst,
op->dest.reg = inst.reg2i12_format.rd;
}
}
if ((inst.reg2i12_format.rd == CFI_SP) && (inst.reg2i12_format.rj == CFI_FP)) {
/* addi.d sp,fp,si12 */
struct symbol *func = find_func_containing(insn->sec, insn->offset);
if (!func)
return false;
func->frame_pointer = true;
}
break;
case ldd_op:
if (inst.reg2i12_format.rj == CFI_SP) {
......
......@@ -3043,10 +3043,27 @@ static int update_cfi_state(struct instruction *insn,
break;
}
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
if (op->dest.reg == CFI_BP && op->src.reg == CFI_SP &&
insn->sym->frame_pointer) {
/* addi.d fp,sp,imm on LoongArch */
if (cfa->base == CFI_SP && cfa->offset == op->src.offset) {
cfa->base = CFI_BP;
cfa->offset = 0;
}
break;
}
/* lea disp(%rbp), %rsp */
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
/* addi.d sp,fp,imm on LoongArch */
if (cfa->base == CFI_BP && cfa->offset == 0) {
if (insn->sym->frame_pointer) {
cfa->base = CFI_SP;
cfa->offset = -op->src.offset;
}
} else {
/* lea disp(%rbp), %rsp */
cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
}
break;
}
......
......@@ -68,6 +68,7 @@ struct symbol {
u8 warned : 1;
u8 embedded_insn : 1;
u8 local_label : 1;
u8 frame_pointer : 1;
struct list_head pv_target;
struct reloc *relocs;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment