Commit 527cd207 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-4.17-mw0' of...

Merge tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux

Pull RISC-V updates from Palmer Dabbelt:
 "This contains the new features we'd like to incorporate into the
  RISC-V port for 4.17. We might have a bit more stuff land later in the
  merge window, but I wanted to get this out earlier just so everyone
  can see where we currently stand.

  A short summary of the changes is:

   - We've added support for dynamic ftrace on RISC-V targets.

   - There have been a handful of cleanups to our atomic and locking
     routines. They now more closely match the released RISC-V memory
     model draft.

   - Our module loading support has been cleaned up and is now enabled
     by default, despite some limitations still existing.

   - A patch to define COMMANDLINE_FORCE instead of COMMANDLINE_OVERRIDE
     so the generic device tree code picks up handling all our command
     line stuff.

  There's more information in the merge commits for each patch set"

* tag 'riscv-for-linus-4.17-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux: (21 commits)
  RISC-V: Rename CONFIG_CMDLINE_OVERRIDE to CONFIG_CMDLINE_FORCE
  RISC-V: Add definition of relocation types
  RISC-V: Enable module support in defconfig
  RISC-V: Support SUB32 relocation type in kernel module
  RISC-V: Support ADD32 relocation type in kernel module
  RISC-V: Support ALIGN relocation type in kernel module
  RISC-V: Support RVC_BRANCH/JUMP relocation type in kernel modulewq
  RISC-V: Support HI20/LO12_I/LO12_S relocation type in kernel module
  RISC-V: Support CALL relocation type in kernel module
  RISC-V: Support GOT_HI20/CALL_PLT relocation type in kernel module
  RISC-V: Add section of GOT.PLT for kernel module
  RISC-V: Add sections of PLT and GOT for kernel module
  riscv/atomic: Strengthen implementations with fences
  riscv/spinlock: Strengthen implementations with fences
  riscv/barrier: Define __smp_{store_release,load_acquire}
  riscv/ftrace: Add HAVE_FUNCTION_GRAPH_RET_ADDR_PTR support
  riscv/ftrace: Add DYNAMIC_FTRACE_WITH_REGS support
  riscv/ftrace: Add ARCH_SUPPORTS_FTRACE_OPS support
  riscv/ftrace: Add dynamic function graph tracer support
  riscv/ftrace: Add dynamic function tracer support
  ...
parents 23221d99 f6a11d9f
...@@ -115,6 +115,9 @@ config ARCH_RV64I ...@@ -115,6 +115,9 @@ config ARCH_RV64I
select 64BIT select 64BIT
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
endchoice endchoice
...@@ -132,6 +135,10 @@ choice ...@@ -132,6 +135,10 @@ choice
bool "medium any code model" bool "medium any code model"
endchoice endchoice
config MODULE_SECTIONS
bool
select HAVE_MOD_ARCH_SPECIFIC
choice choice
prompt "Maximum Physical Memory" prompt "Maximum Physical Memory"
default MAXPHYSMEM_2GB if 32BIT default MAXPHYSMEM_2GB if 32BIT
...@@ -142,6 +149,7 @@ choice ...@@ -142,6 +149,7 @@ choice
bool "2GiB" bool "2GiB"
config MAXPHYSMEM_128GB config MAXPHYSMEM_128GB
depends on 64BIT && CMODEL_MEDANY depends on 64BIT && CMODEL_MEDANY
select MODULE_SECTIONS if MODULES
bool "128GiB" bool "128GiB"
endchoice endchoice
...@@ -282,7 +290,7 @@ config CMDLINE_BOOL ...@@ -282,7 +290,7 @@ config CMDLINE_BOOL
in CONFIG_CMDLINE. in CONFIG_CMDLINE.
The built-in options will be concatenated to the default command The built-in options will be concatenated to the default command
line if CMDLINE_OVERRIDE is set to 'N'. Otherwise, the default line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
command line will be ignored and replaced by the built-in string. command line will be ignored and replaced by the built-in string.
config CMDLINE config CMDLINE
...@@ -292,7 +300,7 @@ config CMDLINE ...@@ -292,7 +300,7 @@ config CMDLINE
help help
Supply command-line options at build time by entering them here. Supply command-line options at build time by entering them here.
config CMDLINE_OVERRIDE config CMDLINE_FORCE
bool "Built-in command line overrides bootloader arguments" bool "Built-in command line overrides bootloader arguments"
depends on CMDLINE_BOOL depends on CMDLINE_BOOL
help help
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
LDFLAGS := LDFLAGS :=
OBJCOPYFLAGS := -O binary OBJCOPYFLAGS := -O binary
LDFLAGS_vmlinux := LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux := --no-relax
endif
KBUILD_AFLAGS_MODULE += -fPIC KBUILD_AFLAGS_MODULE += -fPIC
KBUILD_CFLAGS_MODULE += -fPIC KBUILD_CFLAGS_MODULE += -fPIC
...@@ -56,6 +59,11 @@ endif ...@@ -56,6 +59,11 @@ endif
ifeq ($(CONFIG_CMODEL_MEDANY),y) ifeq ($(CONFIG_CMODEL_MEDANY),y)
KBUILD_CFLAGS += -mcmodel=medany KBUILD_CFLAGS += -mcmodel=medany
endif endif
ifeq ($(CONFIG_MODULE_SECTIONS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing # GCC versions that support the "-mstrict-align" option default to allowing
# unaligned accesses. While unaligned accesses are explicitly allowed in the # unaligned accesses. While unaligned accesses are explicitly allowed in the
......
...@@ -73,3 +73,5 @@ CONFIG_NFS_V4_2=y ...@@ -73,3 +73,5 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y CONFIG_ROOT_NFS=y
# CONFIG_RCU_TRACE is not set # CONFIG_RCU_TRACE is not set
CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
This diff is collapsed.
...@@ -38,6 +38,21 @@ ...@@ -38,6 +38,21 @@
#define __smp_rmb() RISCV_FENCE(r,r) #define __smp_rmb() RISCV_FENCE(r,r)
#define __smp_wmb() RISCV_FENCE(w,w) #define __smp_wmb() RISCV_FENCE(w,w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(rw,w); \
WRITE_ONCE(*p, v); \
} while (0)
#define __smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
RISCV_FENCE(r,rw); \
___p1; \
})
/* /*
* This is a very specific barrier: it's currently only used in two places in * This is a very specific barrier: it's currently only used in two places in
* the kernel, both in the scheduler. See include/linux/spinlock.h for the two * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
......
This diff is collapsed.
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
#ifdef CONFIG_SMP
#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
...@@ -8,3 +8,59 @@ ...@@ -8,3 +8,59 @@
#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER) #if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
#define HAVE_FUNCTION_GRAPH_FP_TEST #define HAVE_FUNCTION_GRAPH_FP_TEST
#endif #endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
}
struct dyn_arch_ftrace {
};
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/*
* A general call in RISC-V is a pair of insts:
* 1) auipc: setting high-20 pc-related bits to ra register
* 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
* return address (original pc + 4)
*
* Dynamic ftrace generates probes to call sites, so we must deal with
* both auipc and jalr at the same time.
*/
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
#define AUIPC_PAD (0x00001000)
#define JALR_SHIFT 20
#define JALR_BASIC (0x000080e7)
#define AUIPC_BASIC (0x00000097)
#define NOP4 (0x00000013)
#define make_call(caller, callee, call) \
do { \
call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \
(unsigned long)caller)); \
call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \
(unsigned long)caller)); \
} while (0)
#define to_jalr_insn(offset) \
(((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
#define to_auipc_insn(offset) \
((offset & JALR_SIGN_MASK) ? \
(((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
/*
* Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
*/
#define MCOUNT_INSN_SIZE 8
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#ifndef _ASM_RISCV_MODULE_H
#define _ASM_RISCV_MODULE_H
#include <asm-generic/module.h>
#define MODULE_ARCH_VERMAGIC "riscv"
u64 module_emit_got_entry(struct module *mod, u64 val);
u64 module_emit_plt_entry(struct module *mod, u64 val);
#ifdef CONFIG_MODULE_SECTIONS
struct mod_section {
struct elf64_shdr *shdr;
int num_entries;
int max_entries;
};
struct mod_arch_specific {
struct mod_section got;
struct mod_section plt;
struct mod_section got_plt;
};
struct got_entry {
u64 symbol_addr; /* the real variable address */
};
static inline struct got_entry emit_got_entry(u64 val)
{
return (struct got_entry) {val};
}
static inline struct got_entry *get_got_entry(u64 val,
const struct mod_section *sec)
{
struct got_entry *got = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got[i].symbol_addr == val)
return &got[i];
}
return NULL;
}
struct plt_entry {
/*
* Trampoline code to real target address. The return address
* should be the original (pc+4) before entring plt entry.
*/
u32 insn_auipc; /* auipc t0, 0x0 */
u32 insn_ld; /* ld t1, 0x10(t0) */
u32 insn_jr; /* jr t1 */
};
#define OPC_AUIPC 0x0017
#define OPC_LD 0x3003
#define OPC_JALR 0x0067
#define REG_T0 0x5
#define REG_T1 0x6
static inline struct plt_entry emit_plt_entry(u64 val, u64 plt, u64 got_plt)
{
/*
* U-Type encoding:
* +------------+----------+----------+
* | imm[31:12] | rd[11:7] | opc[6:0] |
* +------------+----------+----------+
*
* I-Type encoding:
* +------------+------------+--------+----------+----------+
* | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] |
* +------------+------------+--------+----------+----------+
*
*/
u64 offset = got_plt - plt;
u32 hi20 = (offset + 0x800) & 0xfffff000;
u32 lo12 = (offset - hi20);
return (struct plt_entry) {
OPC_AUIPC | (REG_T0 << 7) | hi20,
OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7),
OPC_JALR | (REG_T1 << 15)
};
}
static inline int get_got_plt_idx(u64 val, const struct mod_section *sec)
{
struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
int i;
for (i = 0; i < sec->num_entries; i++) {
if (got_plt[i].symbol_addr == val)
return i;
}
return -1;
}
static inline struct plt_entry *get_plt_entry(u64 val,
const struct mod_section *sec_plt,
const struct mod_section *sec_got_plt)
{
struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
if (got_plt_idx >= 0)
return plt + got_plt_idx;
else
return NULL;
}
#endif /* CONFIG_MODULE_SECTIONS */
#endif /* _ASM_RISCV_MODULE_H */
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/fence.h>
/* /*
* Simple spin lock operations. These provide no fairness guarantees. * Simple spin lock operations. These provide no fairness guarantees.
...@@ -28,10 +29,7 @@ ...@@ -28,10 +29,7 @@
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
__asm__ __volatile__ ( smp_store_release(&lock->lock, 0);
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
} }
static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline int arch_spin_trylock(arch_spinlock_t *lock)
...@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -39,7 +37,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
int tmp = 1, busy; int tmp = 1, busy;
__asm__ __volatile__ ( __asm__ __volatile__ (
"amoswap.w.aq %0, %2, %1" " amoswap.w %0, %2, %1\n"
RISCV_ACQUIRE_BARRIER
: "=r" (busy), "+A" (lock->lock) : "=r" (busy), "+A" (lock->lock)
: "r" (tmp) : "r" (tmp)
: "memory"); : "memory");
...@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) ...@@ -68,8 +67,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n" "1: lr.w %1, %0\n"
" bltz %1, 1b\n" " bltz %1, 1b\n"
" addi %1, %1, 1\n" " addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n" " sc.w %1, %1, %0\n"
" bnez %1, 1b\n" " bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp) : "+A" (lock->lock), "=&r" (tmp)
:: "memory"); :: "memory");
} }
...@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) ...@@ -82,8 +82,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n" "1: lr.w %1, %0\n"
" bnez %1, 1b\n" " bnez %1, 1b\n"
" li %1, -1\n" " li %1, -1\n"
" sc.w.aq %1, %1, %0\n" " sc.w %1, %1, %0\n"
" bnez %1, 1b\n" " bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp) : "+A" (lock->lock), "=&r" (tmp)
:: "memory"); :: "memory");
} }
...@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) ...@@ -96,8 +97,9 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n" "1: lr.w %1, %0\n"
" bltz %1, 1f\n" " bltz %1, 1f\n"
" addi %1, %1, 1\n" " addi %1, %1, 1\n"
" sc.w.aq %1, %1, %0\n" " sc.w %1, %1, %0\n"
" bnez %1, 1b\n" " bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n" "1:\n"
: "+A" (lock->lock), "=&r" (busy) : "+A" (lock->lock), "=&r" (busy)
:: "memory"); :: "memory");
...@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) ...@@ -113,8 +115,9 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
"1: lr.w %1, %0\n" "1: lr.w %1, %0\n"
" bnez %1, 1f\n" " bnez %1, 1f\n"
" li %1, -1\n" " li %1, -1\n"
" sc.w.aq %1, %1, %0\n" " sc.w %1, %1, %0\n"
" bnez %1, 1b\n" " bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n" "1:\n"
: "+A" (lock->lock), "=&r" (busy) : "+A" (lock->lock), "=&r" (busy)
:: "memory"); :: "memory");
...@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) ...@@ -125,7 +128,8 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
static inline void arch_read_unlock(arch_rwlock_t *lock) static inline void arch_read_unlock(arch_rwlock_t *lock)
{ {
__asm__ __volatile__( __asm__ __volatile__(
"amoadd.w.rl x0, %1, %0" RISCV_RELEASE_BARRIER
" amoadd.w x0, %1, %0\n"
: "+A" (lock->lock) : "+A" (lock->lock)
: "r" (-1) : "r" (-1)
: "memory"); : "memory");
...@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) ...@@ -133,10 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
static inline void arch_write_unlock(arch_rwlock_t *lock) static inline void arch_write_unlock(arch_rwlock_t *lock)
{ {
__asm__ __volatile__ ( smp_store_release(&lock->lock, 0);
"amoswap.w.rl x0, x0, %0"
: "=A" (lock->lock)
:: "memory");
} }
#endif /* _ASM_RISCV_SPINLOCK_H */ #endif /* _ASM_RISCV_SPINLOCK_H */
...@@ -79,5 +79,12 @@ typedef union __riscv_fp_state elf_fpregset_t; ...@@ -79,5 +79,12 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_TPREL_I 49 #define R_RISCV_TPREL_I 49
#define R_RISCV_TPREL_S 50 #define R_RISCV_TPREL_S 50
#define R_RISCV_RELAX 51 #define R_RISCV_RELAX 51
#define R_RISCV_SUB6 52
#define R_RISCV_SET6 53
#define R_RISCV_SET8 54
#define R_RISCV_SET16 55
#define R_RISCV_SET32 56
#define R_RISCV_32_PCREL 57
#endif /* _UAPI_ASM_ELF_H */ #endif /* _UAPI_ASM_ELF_H */
...@@ -34,7 +34,9 @@ CFLAGS_setup.o := -mcmodel=medany ...@@ -34,7 +34,9 @@ CFLAGS_setup.o := -mcmodel=medany
obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
clean: clean:
...@@ -6,9 +6,126 @@ ...@@ -6,9 +6,126 @@
*/ */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static int ftrace_check_current_call(unsigned long hook_pos,
unsigned int *expected)
{
unsigned int replaced[2];
unsigned int nops[2] = {NOP4, NOP4};
/* we expect nops at the hook position */
if (!expected)
expected = nops;
/*
* Read the text we want to modify;
* return must be -EFAULT on read error
*/
if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
return -EFAULT;
/*
* Make sure it is what we expect it to be;
* return must be -EINVAL on failed comparison
*/
if (memcmp(expected, replaced, sizeof(replaced))) {
pr_err("%p: expected (%08x %08x) but get (%08x %08x)",
(void *)hook_pos, expected[0], expected[1], replaced[0],
replaced[1]);
return -EINVAL;
}
return 0;
}
static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
bool enable)
{
unsigned int call[2];
unsigned int nops[2] = {NOP4, NOP4};
int ret = 0;
make_call(hook_pos, target, call);
/* replace the auipc-jalr pair at once */
ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
MCOUNT_INSN_SIZE);
/* return must be -EPERM on write error */
if (ret)
return -EPERM;
smp_mb();
flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret = ftrace_check_current_call(rec->ip, NULL);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, true);
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
unsigned int call[2];
int ret;
make_call(rec->ip, addr, call);
ret = ftrace_check_current_call(rec->ip, call);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, false);
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
(unsigned long)func, true);
if (!ret) {
ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
(unsigned long)func, true);
}
return ret;
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned int call[2];
int ret;
make_call(rec->ip, old_addr, call);
ret = ftrace_check_current_call(rec->ip, call);
if (ret)
return ret;
return __ftrace_modify_call(rec->ip, addr, true);
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
* Most of this file is copied from arm64. * Most of this function is copied from arm64.
*/ */
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer) unsigned long frame_pointer)
...@@ -34,8 +151,62 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, ...@@ -34,8 +151,62 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
return; return;
err = ftrace_push_return_trace(old, self_addr, &trace.depth, err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer, NULL); frame_pointer, parent);
if (err == -EBUSY) if (err == -EBUSY)
return; return;
*parent = return_hooker; *parent = return_hooker;
} }
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned int call[2];
static int init_graph = 1;
int ret;
make_call(&ftrace_graph_call, &ftrace_stub, call);
/*
* When enabling graph tracer for the first time, ftrace_graph_call
* should contains a call to ftrace_stub. Once it has been disabled,
* the 8-bytes at the position becomes NOPs.
*/
if (init_graph) {
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
call);
init_graph = 0;
} else {
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
NULL);
}
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned int call[2];
int ret;
make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
/*
* This is to make sure that ftrace_enable_ftrace_graph_caller
* did the right thing.
*/
ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
call);
if (ret)
return ret;
return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
.macro SAVE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi sp, sp, -48
sd s0, 32(sp)
sd ra, 40(sp)
addi s0, sp, 48
sd t0, 24(sp)
sd t1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
sd t2, 8(sp)
#endif
#else
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
#endif
.endm
.macro RESTORE_ABI_STATE
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ld s0, 32(sp)
ld ra, 40(sp)
addi sp, sp, 48
#else
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
#endif
.endm
.macro RESTORE_GRAPH_ARGS
ld a0, 24(sp)
ld a1, 16(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, 8(sp)
#endif
.endm
ENTRY(ftrace_graph_caller)
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
addi s0, sp, 16
ftrace_graph_call:
.global ftrace_graph_call
/*
* Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
* call below. Check ftrace_modify_all_code for details.
*/
call ftrace_stub
ld ra, 8(sp)
ld s0, 0(sp)
addi sp, sp, 16
ret
ENDPROC(ftrace_graph_caller)
ENTRY(ftrace_caller)
/*
* a0: the address in the caller when calling ftrace_caller
* a1: the caller's return address
* a2: the address of global variable function_trace_op
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* the graph tracer (specifically, prepare_ftrace_return) needs these
* arguments but for now the function tracer occupies the regs, so we
* save them in temporary regs to recover later.
*/
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ABI_STATE
ftrace_call:
.global ftrace_call
/*
* For the dynamic ftrace to work, here we should reserve at least
* 8 bytes for a functional auipc-jalr pair. The following call
* serves this purpose.
*
* Calling ftrace_update_ftrace_func would overwrite the nops below.
* Check ftrace_modify_all_code for details.
*/
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_ARGS
call ftrace_graph_caller
#endif
RESTORE_ABI_STATE
ret
ENDPROC(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
.macro SAVE_ALL
addi sp, sp, -(PT_SIZE_ON_STACK+16)
sd s0, (PT_SIZE_ON_STACK)(sp)
sd ra, (PT_SIZE_ON_STACK+8)(sp)
addi s0, sp, (PT_SIZE_ON_STACK+16)
sd x1, PT_RA(sp)
sd x2, PT_SP(sp)
sd x3, PT_GP(sp)
sd x4, PT_TP(sp)
sd x5, PT_T0(sp)
sd x6, PT_T1(sp)
sd x7, PT_T2(sp)
sd x8, PT_S0(sp)
sd x9, PT_S1(sp)
sd x10, PT_A0(sp)
sd x11, PT_A1(sp)
sd x12, PT_A2(sp)
sd x13, PT_A3(sp)
sd x14, PT_A4(sp)
sd x15, PT_A5(sp)
sd x16, PT_A6(sp)
sd x17, PT_A7(sp)
sd x18, PT_S2(sp)
sd x19, PT_S3(sp)
sd x20, PT_S4(sp)
sd x21, PT_S5(sp)
sd x22, PT_S6(sp)
sd x23, PT_S7(sp)
sd x24, PT_S8(sp)
sd x25, PT_S9(sp)
sd x26, PT_S10(sp)
sd x27, PT_S11(sp)
sd x28, PT_T3(sp)
sd x29, PT_T4(sp)
sd x30, PT_T5(sp)
sd x31, PT_T6(sp)
.endm
.macro RESTORE_ALL
ld x1, PT_RA(sp)
ld x2, PT_SP(sp)
ld x3, PT_GP(sp)
ld x4, PT_TP(sp)
ld x5, PT_T0(sp)
ld x6, PT_T1(sp)
ld x7, PT_T2(sp)
ld x8, PT_S0(sp)
ld x9, PT_S1(sp)
ld x10, PT_A0(sp)
ld x11, PT_A1(sp)
ld x12, PT_A2(sp)
ld x13, PT_A3(sp)
ld x14, PT_A4(sp)
ld x15, PT_A5(sp)
ld x16, PT_A6(sp)
ld x17, PT_A7(sp)
ld x18, PT_S2(sp)
ld x19, PT_S3(sp)
ld x20, PT_S4(sp)
ld x21, PT_S5(sp)
ld x22, PT_S6(sp)
ld x23, PT_S7(sp)
ld x24, PT_S8(sp)
ld x25, PT_S9(sp)
ld x26, PT_S10(sp)
ld x27, PT_S11(sp)
ld x28, PT_T3(sp)
ld x29, PT_T4(sp)
ld x30, PT_T5(sp)
ld x31, PT_T6(sp)
ld s0, (PT_SIZE_ON_STACK)(sp)
ld ra, (PT_SIZE_ON_STACK+8)(sp)
addi sp, sp, (PT_SIZE_ON_STACK+16)
.endm
.macro RESTORE_GRAPH_REG_ARGS
ld a0, PT_T0(sp)
ld a1, PT_T1(sp)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld a2, PT_T2(sp)
#endif
.endm
/*
* Most of the contents are the same as ftrace_caller.
*/
ENTRY(ftrace_regs_caller)
/*
* a3: the address of all registers in the stack
*/
ld a1, -8(s0)
addi a0, ra, -MCOUNT_INSN_SIZE
la t5, function_trace_op
ld a2, 0(t5)
addi a3, sp, -(PT_SIZE_ON_STACK+16)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
addi t0, s0, -8
mv t1, a0
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
ld t2, -16(s0)
#endif
#endif
SAVE_ALL
ftrace_regs_call:
.global ftrace_regs_call
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
RESTORE_GRAPH_REG_ARGS
call ftrace_graph_caller
#endif
RESTORE_ALL
ret
ENDPROC(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
...@@ -32,13 +32,13 @@ ...@@ -32,13 +32,13 @@
addi s0, sp, 32 addi s0, sp, 32
.endm .endm
.macro STORE_ABI_STATE .macro RESTORE_ABI_STATE
ld ra, 8(sp) ld ra, 8(sp)
ld s0, 0(sp) ld s0, 0(sp)
addi sp, sp, 16 addi sp, sp, 16
.endm .endm
.macro STORE_RET_ABI_STATE .macro RESTORE_RET_ABI_STATE
ld ra, 24(sp) ld ra, 24(sp)
ld s0, 16(sp) ld s0, 16(sp)
ld a0, 8(sp) ld a0, 8(sp)
...@@ -46,6 +46,10 @@ ...@@ -46,6 +46,10 @@
.endm .endm
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global _mcount
.set _mcount, ftrace_stub
#endif
ret ret
ENDPROC(ftrace_stub) ENDPROC(ftrace_stub)
...@@ -66,15 +70,15 @@ ENTRY(return_to_handler) ...@@ -66,15 +70,15 @@ ENTRY(return_to_handler)
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a0, t6 mv a0, t6
#endif #endif
la t0, ftrace_return_to_handler call ftrace_return_to_handler
jalr t0
mv a1, a0 mv a1, a0
STORE_RET_ABI_STATE RESTORE_RET_ABI_STATE
jalr a1 jalr a1
ENDPROC(return_to_handler) ENDPROC(return_to_handler)
EXPORT_SYMBOL(return_to_handler) EXPORT_SYMBOL(return_to_handler)
#endif #endif
#ifndef CONFIG_DYNAMIC_FTRACE
ENTRY(_mcount) ENTRY(_mcount)
la t4, ftrace_stub la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -104,9 +108,8 @@ do_ftrace_graph_caller: ...@@ -104,9 +108,8 @@ do_ftrace_graph_caller:
ld a2, -16(s0) ld a2, -16(s0)
#endif #endif
SAVE_ABI_STATE SAVE_ABI_STATE
la t0, prepare_ftrace_return call prepare_ftrace_return
jalr t0 RESTORE_ABI_STATE
STORE_ABI_STATE
ret ret
#endif #endif
...@@ -120,7 +123,8 @@ do_trace: ...@@ -120,7 +123,8 @@ do_trace:
SAVE_ABI_STATE SAVE_ABI_STATE
jalr t5 jalr t5
STORE_ABI_STATE RESTORE_ABI_STATE
ret ret
ENDPROC(_mcount) ENDPROC(_mcount)
EXPORT_SYMBOL(_mcount) EXPORT_SYMBOL(_mcount)
#endif
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* Copyright (C) 2018 Andes Technology Corporation <zong@andestech.com>
*/
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
u64 module_emit_got_entry(struct module *mod, u64 val)
{
struct mod_section *got_sec = &mod->arch.got;
int i = got_sec->num_entries;
struct got_entry *got = get_got_entry(val, got_sec);
if (got)
return (u64)got;
/* There is no duplicate entry, create a new one */
got = (struct got_entry *)got_sec->shdr->sh_addr;
got[i] = emit_got_entry(val);
got_sec->num_entries++;
BUG_ON(got_sec->num_entries > got_sec->max_entries);
return (u64)&got[i];
}
u64 module_emit_plt_entry(struct module *mod, u64 val)
{
struct mod_section *got_plt_sec = &mod->arch.got_plt;
struct got_entry *got_plt;
struct mod_section *plt_sec = &mod->arch.plt;
struct plt_entry *plt = get_plt_entry(val, plt_sec, got_plt_sec);
int i = plt_sec->num_entries;
if (plt)
return (u64)plt;
/* There is no duplicate entry, create a new one */
got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
got_plt[i] = emit_got_entry(val);
plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
plt[i] = emit_plt_entry(val, (u64)&plt[i], (u64)&got_plt[i]);
plt_sec->num_entries++;
got_plt_sec->num_entries++;
BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
return (u64)&plt[i];
}
static int is_rela_equal(const Elf64_Rela *x, const Elf64_Rela *y)
{
return x->r_info == y->r_info && x->r_addend == y->r_addend;
}
static bool duplicate_rela(const Elf64_Rela *rela, int idx)
{
int i;
for (i = 0; i < idx; i++) {
if (is_rela_equal(&rela[i], &rela[idx]))
return true;
}
return false;
}
static void count_max_entries(Elf64_Rela *relas, int num,
unsigned int *plts, unsigned int *gots)
{
unsigned int type, i;
for (i = 0; i < num; i++) {
type = ELF64_R_TYPE(relas[i].r_info);
if (type == R_RISCV_CALL_PLT) {
if (!duplicate_rela(relas, i))
(*plts)++;
} else if (type == R_RISCV_GOT_HI20) {
if (!duplicate_rela(relas, i))
(*gots)++;
}
}
}
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
unsigned int num_plts = 0;
unsigned int num_gots = 0;
int i;
/*
* Find the empty .got and .plt sections.
*/
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
mod->arch.plt.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
mod->arch.got.shdr = sechdrs + i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got.plt"))
mod->arch.got_plt.shdr = sechdrs + i;
}
if (!mod->arch.plt.shdr) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.got.shdr) {
pr_err("%s: module GOT section(s) missing\n", mod->name);
return -ENOEXEC;
}
if (!mod->arch.got_plt.shdr) {
pr_err("%s: module GOT.PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
/* Calculate the maxinum number of entries */
for (i = 0; i < ehdr->e_shnum; i++) {
Elf64_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
int num_rela = sechdrs[i].sh_size / sizeof(Elf64_Rela);
Elf64_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
if (sechdrs[i].sh_type != SHT_RELA)
continue;
/* ignore relocations that operate on non-exec sections */
if (!(dst_sec->sh_flags & SHF_EXECINSTR))
continue;
count_max_entries(relas, num_rela, &num_plts, &num_gots);
}
mod->arch.plt.shdr->sh_type = SHT_NOBITS;
mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
mod->arch.plt.num_entries = 0;
mod->arch.plt.max_entries = num_plts;
mod->arch.got.shdr->sh_type = SHT_NOBITS;
mod->arch.got.shdr->sh_flags = SHF_ALLOC;
mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
mod->arch.got.num_entries = 0;
mod->arch.got.max_entries = num_gots;
mod->arch.got_plt.shdr->sh_type = SHT_NOBITS;
mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC;
mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES;
mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry);
mod->arch.got_plt.num_entries = 0;
mod->arch.got_plt.max_entries = num_plts;
return 0;
}
...@@ -49,6 +49,39 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location, ...@@ -49,6 +49,39 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
return 0; return 0;
} }
static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4_3 = (offset & 0x18) << (12 - 5);
u16 imm2_1 = (offset & 0x6) << (12 - 10);
*(u16 *)location = (*(u16 *)location & 0xe383) |
imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
return 0;
}
static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
u16 imm7 = (offset & 0x80) >> (7 - 6);
u16 imm6 = (offset & 0x40) << (12 - 11);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4 = (offset & 0x10) << (12 - 5);
u16 imm3_1 = (offset & 0xe) << (12 - 10);
*(u16 *)location = (*(u16 *)location & 0xe003) |
imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
return 0;
}
static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
Elf_Addr v) Elf_Addr v)
{ {
...@@ -92,6 +125,67 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, ...@@ -92,6 +125,67 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
return 0; return 0;
} }
static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s32 hi20;
if (IS_ENABLED(CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
hi20 = ((s32)v + 0x800) & 0xfffff000;
*location = (*location & 0xfff) | hi20;
return 0;
}
static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
*location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
return 0;
}
static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
*location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
return 0;
}
static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
s32 hi20;
/* Always emit the got entry */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
offset = module_emit_got_entry(me, v);
offset = (void *)offset - (void *)location;
} else {
pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
hi20 = (offset + 0x800) & 0xfffff000;
*location = (*location & 0xfff) | hi20;
return 0;
}
static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
Elf_Addr v) Elf_Addr v)
{ {
...@@ -99,6 +193,33 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, ...@@ -99,6 +193,33 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
s32 fill_v = offset; s32 fill_v = offset;
u32 hi20, lo12; u32 hi20, lo12;
if (offset != fill_v) {
/* Only emit the plt entry if offset over 32-bit range */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
offset = module_emit_plt_entry(me, v);
offset = (void *)offset - (void *)location;
} else {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location);
return -EINVAL;
}
}
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
*location = (*location & 0xfff) | hi20;
*(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
return 0;
}
static int apply_r_riscv_call_rela(struct module *me, u32 *location,
Elf_Addr v)
{
s64 offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
if (offset != fill_v) { if (offset != fill_v) {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
...@@ -119,16 +240,49 @@ static int apply_r_riscv_relax_rela(struct module *me, u32 *location, ...@@ -119,16 +240,49 @@ static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
return 0; return 0;
} }
static int apply_r_riscv_align_rela(struct module *me, u32 *location,
Elf_Addr v)
{
pr_err(
"%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
me->name, location);
return -EINVAL;
}
static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*(u32 *)location += (*(u32 *)v);
return 0;
}
static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*(u32 *)location -= (*(u32 *)v);
return 0;
}
static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
Elf_Addr v) = { Elf_Addr v) = {
[R_RISCV_64] = apply_r_riscv_64_rela, [R_RISCV_64] = apply_r_riscv_64_rela,
[R_RISCV_BRANCH] = apply_r_riscv_branch_rela, [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
[R_RISCV_JAL] = apply_r_riscv_jal_rela, [R_RISCV_JAL] = apply_r_riscv_jal_rela,
[R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela,
[R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
[R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela, [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
[R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela, [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
[R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela, [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
[R_RISCV_HI20] = apply_r_riscv_hi20_rela,
[R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela,
[R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela,
[R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela,
[R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela, [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
[R_RISCV_CALL] = apply_r_riscv_call_rela,
[R_RISCV_RELAX] = apply_r_riscv_relax_rela, [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
[R_RISCV_ALIGN] = apply_r_riscv_align_rela,
[R_RISCV_ADD32] = apply_r_riscv_add32_rela,
[R_RISCV_SUB32] = apply_r_riscv_sub32_rela,
}; };
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
...@@ -184,25 +338,38 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, ...@@ -184,25 +338,38 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
u64 hi20_loc = u64 hi20_loc =
sechdrs[sechdrs[relsec].sh_info].sh_addr sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[j].r_offset; + rel[j].r_offset;
/* Find the corresponding HI20 PC-relative relocation entry */ u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
if (hi20_loc == sym->st_value) {
/* Find the corresponding HI20 relocation entry */
if (hi20_loc == sym->st_value
&& (hi20_type == R_RISCV_PCREL_HI20
|| hi20_type == R_RISCV_GOT_HI20)) {
s32 hi20, lo12;
Elf_Sym *hi20_sym = Elf_Sym *hi20_sym =
(Elf_Sym *)sechdrs[symindex].sh_addr (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_RISCV_R_SYM(rel[j].r_info); + ELF_RISCV_R_SYM(rel[j].r_info);
u64 hi20_sym_val = u64 hi20_sym_val =
hi20_sym->st_value hi20_sym->st_value
+ rel[j].r_addend; + rel[j].r_addend;
/* Calculate lo12 */ /* Calculate lo12 */
s64 offset = hi20_sym_val - hi20_loc; u64 offset = hi20_sym_val - hi20_loc;
s32 hi20 = (offset + 0x800) & 0xfffff000; if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
s32 lo12 = offset - hi20; && hi20_type == R_RISCV_GOT_HI20) {
offset = module_emit_got_entry(
me, hi20_sym_val);
offset = offset - hi20_loc;
}
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = offset - hi20;
v = lo12; v = lo12;
break; break;
} }
} }
if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
pr_err( pr_err(
"%s: Can not find HI20 PC-relative relocation information\n", "%s: Can not find HI20 relocation information\n",
me->name); me->name);
return -EINVAL; return -EINVAL;
} }
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */
SECTIONS {
.plt (NOLOAD) : { BYTE(0) }
.got (NOLOAD) : { BYTE(0) }
.got.plt (NOLOAD) : { BYTE(0) }
}
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/ftrace.h>
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
...@@ -63,7 +64,12 @@ static void notrace walk_stackframe(struct task_struct *task, ...@@ -63,7 +64,12 @@ static void notrace walk_stackframe(struct task_struct *task,
frame = (struct stackframe *)fp - 1; frame = (struct stackframe *)fp - 1;
sp = fp; sp = fp;
fp = frame->fp; fp = frame->fp;
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
#else
pc = frame->ra - 0x4; pc = frame->ra - 0x4;
#endif
} }
} }
......
...@@ -368,6 +368,11 @@ if ($arch eq "x86_64") { ...@@ -368,6 +368,11 @@ if ($arch eq "x86_64") {
} elsif ($arch eq "microblaze") { } elsif ($arch eq "microblaze") {
# Microblaze calls '_mcount' instead of plain 'mcount'. # Microblaze calls '_mcount' instead of plain 'mcount'.
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$";
} elsif ($arch eq "riscv") {
$function_regex = "^([0-9a-fA-F]+)\\s+<([^.0-9][0-9a-zA-Z_\\.]+)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
$type = ".quad";
$alignment = 2;
} else { } else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment