Commit 6f4eea90 authored by Vincent Chen's avatar Vincent Chen Committed by Palmer Dabbelt

riscv: Introduce alternative mechanism to apply errata solution

Introduce the "alternative" mechanism from ARM64 and x86 to apply the CPU
vendors' errata solution at runtime. The main purpose of this patch is
to provide a framework. Therefore, the implementation is quite basic for
now so that some scenarios could not use this schemei, such as patching
code to a module, relocating the patching code and heterogeneous CPU
topology.

Users could use the macro ALTERNATIVE to apply an errata to the existing
code flow. In the macro ALTERNATIVE, users need to specify the manufacturer
information(vendorid, archid, and impid) for this errata. Therefore, kernel
will know this errata is suitable for which CPU core. During the booting
procedure, kernel will select the errata required by the CPU core and then
patch it. It means that the kernel only applies the errata to the specified
CPU core. In this case, the vendor's errata does not affect each other at
runtime. The above patching procedure only occurs during the booting phase,
so we only take the overhead of the "alternative" mechanism once.

This "alternative" mechanism is enabled by default to ensure that all
required errata will be applied. However, users can disable this feature by
the Kconfig "CONFIG_RISCV_ERRATA_ALTERNATIVE".
Signed-off-by: default avatarVincent Chen <vincent.chen@sifive.com>
Reviewed-by: default avatarAnup Patel <anup@brainfault.org>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 183787c6
...@@ -207,6 +207,7 @@ config LOCKDEP_SUPPORT ...@@ -207,6 +207,7 @@ config LOCKDEP_SUPPORT
def_bool y def_bool y
source "arch/riscv/Kconfig.socs" source "arch/riscv/Kconfig.socs"
source "arch/riscv/Kconfig.erratas"
menu "Platform type" menu "Platform type"
......
menu "CPU errata selection"
config RISCV_ERRATA_ALTERNATIVE
bool "RISC-V alternative scheme"
default y
help
This Kconfig allows the kernel to automatically patch the
errata required by the execution platform at run time. The
code patching is performed once in the boot stages. It means
that the overhead from this mechanism is just taken once.
endmenu
...@@ -87,6 +87,7 @@ KBUILD_IMAGE := $(boot)/Image.gz ...@@ -87,6 +87,7 @@ KBUILD_IMAGE := $(boot)/Image.gz
head-y := arch/riscv/kernel/head.o head-y := arch/riscv/kernel/head.o
core-y += arch/riscv/ core-y += arch/riscv/
core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
libs-y += arch/riscv/lib/ libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
......
obj-y += alternative.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* alternative runtime patching
* inspired by the ARM64 and x86 version
*
* Copyright (C) 2021 Sifive.
*/
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/vendorid_list.h>
#include <asm/sbi.h>
#include <asm/csr.h>
static struct cpu_manufacturer_info_t {
unsigned long vendor_id;
unsigned long arch_id;
unsigned long imp_id;
} cpu_mfr_info;
static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid);
static inline void __init riscv_fill_cpu_mfr_info(void)
{
#ifdef CONFIG_RISCV_M_MODE
cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
#else
cpu_mfr_info.vendor_id = sbi_get_mvendorid();
cpu_mfr_info.arch_id = sbi_get_marchid();
cpu_mfr_info.imp_id = sbi_get_mimpid();
#endif
}
static void __init init_alternative(void)
{
riscv_fill_cpu_mfr_info();
switch (cpu_mfr_info.vendor_id) {
default:
vendor_patch_func = NULL;
}
}
/*
* This is called very early in the boot process (directly after we run
* a feature detect on the boot CPU). No need to worry about other CPUs
* here.
*/
void __init apply_boot_alternatives(void)
{
/* If called on non-boot cpu things could go wrong */
WARN_ON(smp_processor_id() != 0);
init_alternative();
if (!vendor_patch_func)
return;
vendor_patch_func((struct alt_entry *)__alt_start,
(struct alt_entry *)__alt_end,
cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ALTERNATIVE_MACROS_H
#define __ASM_ALTERNATIVE_MACROS_H
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
#ifdef __ASSEMBLY__
.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len
RISCV_PTR \oldptr
RISCV_PTR \newptr
REG_ASM \vendor_id
REG_ASM \new_len
.word \errata_id
.endm
.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg
.if \enable
.pushsection .alternative, "a"
ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f
.popsection
.subsection 1
888 :
\new_c
889 :
.previous
.org . - (889b - 888b) + (887b - 886b)
.org . - (887b - 886b) + (889b - 888b)
.endif
.endm
.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
886 :
\old_c
887 :
ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
.endm
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
#else /* !__ASSEMBLY__ */
#include <asm/asm.h>
#include <linux/stringify.h>
#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
RISCV_PTR " " oldptr "\n" \
RISCV_PTR " " newptr "\n" \
REG_ASM " " vendor_id "\n" \
REG_ASM " " newlen "\n" \
".word " errata_id "\n"
#define ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c) \
".if " __stringify(enable) " == 1\n" \
".pushsection .alternative, \"a\"\n" \
ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
".popsection\n" \
".subsection 1\n" \
"888 :\n" \
new_c "\n" \
"889 :\n" \
".previous\n" \
".org . - (887b - 886b) + (889b - 888b)\n" \
".org . - (889b - 888b) + (887b - 886b)\n" \
".endif\n"
#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
"886 :\n" \
old_c "\n" \
"887 :\n" \
ALT_NEW_CONSTENT(vendor_id, errata_id, enable, new_c)
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
#endif /* __ASSEMBLY__ */
#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/
#ifdef __ASSEMBLY__
.macro __ALTERNATIVE_CFG old_c
\old_c
.endm
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
#define __ALTERNATIVE_CFG(old_c) \
old_c "\n"
#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
__ALTERNATIVE_CFG(old_c)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */
/*
* Usage:
* ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
* in the assembly code. Otherwise,
* asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k));
*
* old_content: The old content which is probably replaced with new content.
* new_content: The new content.
* vendor_id: The CPU vendor ID.
* errata_id: The errata ID.
* CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old
* content will alwyas be executed.
*/
#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \
_ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k)
/*
* A vendor wants to replace an old_content, but another vendor has used
* ALTERNATIVE() to patch its customized content at the same location. In
* this case, this vendor can create a new macro ALTERNATIVE_2() based
* on the following sample code and then replace ALTERNATIVE() with
* ALTERNATIVE_2() to append its customized content.
*
* .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
* new_c_2, vendor_id_2, errata_id_2, enable_2
* 886 :
* \old_c
* 887 :
* ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
* ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
* .endm
*
* #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
* new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
* __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
* new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \
*
* #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
* new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
* _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
* new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
*
*/
#endif
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Sifive.
*/
#ifndef __ASM_ALTERNATIVE_H
#define __ASM_ALTERNATIVE_H
#define ERRATA_STRING_LENGTH_MAX 32
#include <asm/alternative-macros.h>
#ifndef __ASSEMBLY__
#include <linux/init.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/hwcap.h>
void __init apply_boot_alternatives(void);
struct alt_entry {
void *old_ptr; /* address of original instruciton or data */
void *alt_ptr; /* address of replacement instruction or data */
unsigned long vendor_id; /* cpu vendor id */
unsigned long alt_len; /* The replacement size */
unsigned int errata_id; /* The errata id */
} __packed;
struct errata_checkfunc_id {
unsigned long vendor_id;
bool (*func)(struct alt_entry *alt);
};
#endif
#endif
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw) #define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw) #define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w) #define REG_SC __REG_SEL(sc.d, sc.w)
#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4) #define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2) #define LGREG __REG_SEL(3, 2)
......
...@@ -115,6 +115,9 @@ ...@@ -115,6 +115,9 @@
#define CSR_MIP 0x344 #define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0 #define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0 #define CSR_PMPADDR0 0x3b0
#define CSR_MVENDORID 0xf11
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14 #define CSR_MHARTID 0xf14
#ifdef CONFIG_RISCV_M_MODE #ifdef CONFIG_RISCV_M_MODE
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Sifive.
*/
#ifndef ASM_ERRATA_LIST_H
#define ASM_ERRATA_LIST_H
#ifdef CONFIG_ERRATA_SIFIVE
#define ERRATA_SIFIVE_NUMBER 0
#endif
#endif
...@@ -11,5 +11,6 @@ extern char _start[]; ...@@ -11,5 +11,6 @@ extern char _start[];
extern char _start_kernel[]; extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[]; extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[]; extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];
#endif /* __ASM_SECTIONS_H */ #endif /* __ASM_SECTIONS_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 SiFive
*/
#ifndef ASM_VENDOR_LIST_H
#define ASM_VENDOR_LIST_H
#define SIFIVE_VENDOR_ID 0x489
#endif
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/alternative.h>
#include "head.h" #include "head.h"
...@@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running); ...@@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void) void __init smp_prepare_boot_cpu(void)
{ {
init_cpu_topology(); init_cpu_topology();
#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
apply_boot_alternatives();
#endif
} }
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
......
...@@ -90,6 +90,13 @@ SECTIONS ...@@ -90,6 +90,13 @@ SECTIONS
} }
__init_data_end = .; __init_data_end = .;
. = ALIGN(8);
.alternative : {
__alt_start = .;
*(.alternative)
__alt_end = .;
}
__init_end = .; __init_end = .;
/* Start of data section */ /* Start of data section */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment