Commit dd502a81 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'core-static_call-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull static call support from Ingo Molnar:
 "This introduces static_call(), which is the idea of static_branch()
  applied to indirect function calls. Remove a data load (indirection)
  by modifying the text.

  They give the flexibility of function pointers, but with better
  performance. (This is especially important for cases where retpolines
  would otherwise be used, as retpolines can be pretty slow.)

  API overview:

      DECLARE_STATIC_CALL(name, func);
      DEFINE_STATIC_CALL(name, func);
      DEFINE_STATIC_CALL_NULL(name, typename);

      static_call(name)(args...);
      static_call_cond(name)(args...);
      static_call_update(name, func);

  x86 is supported via text patching, otherwise basic indirect calls are
  used, with function pointers.

  There's a second variant using inline code patching, inspired by
  jump-labels, implemented on x86 as well.

  The new APIs are utilized in the x86 perf code, a heavy user of
  function pointers, where static calls speed up the PMU handler by
  4.2% (!).

  The generic implementation is not really excercised on other
  architectures, outside of the trivial test_static_call_init()
  self-test"

* tag 'core-static_call-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  static_call: Fix return type of static_call_init
  tracepoint: Fix out of sync data passing by static caller
  tracepoint: Fix overly long tracepoint names
  x86/perf, static_call: Optimize x86_pmu methods
  tracepoint: Optimize using static_call()
  static_call: Allow early init
  static_call: Add some validation
  static_call: Handle tail-calls
  static_call: Add static_call_cond()
  x86/alternatives: Teach text_poke_bp() to emulate RET
  static_call: Add simple self-test for static calls
  x86/static_call: Add inline static call implementation for x86-64
  x86/static_call: Add out-of-line static call implementation
  static_call: Avoid kprobes on inline static_call()s
  static_call: Add inline static call infrastructure
  static_call: Add basic static call infrastructure
  compiler.h: Make __ADDRESSABLE() symbol truly unique
  jump_label,module: Fix module lifetime for __jump_label_mod_text_reserved()
  module: Properly propagate MODULE_STATE_COMING failure
  module: Fix up module_notifier return values
  ...
parents 34eb62d8 69e0ad37
...@@ -106,6 +106,12 @@ config STATIC_KEYS_SELFTEST ...@@ -106,6 +106,12 @@ config STATIC_KEYS_SELFTEST
help help
Boot time self-test of the branch patching code. Boot time self-test of the branch patching code.
config STATIC_CALL_SELFTEST
bool "Static call selftest"
depends on HAVE_STATIC_CALL
help
Boot time self-test of the call patching code.
config OPTPROBES config OPTPROBES
def_bool y def_bool y
depends on KPROBES && HAVE_OPTPROBES depends on KPROBES && HAVE_OPTPROBES
...@@ -975,6 +981,13 @@ config HAVE_SPARSE_SYSCALL_NR ...@@ -975,6 +981,13 @@ config HAVE_SPARSE_SYSCALL_NR
config ARCH_HAS_VDSO_DATA config ARCH_HAS_VDSO_DATA
bool bool
config HAVE_STATIC_CALL
bool
config HAVE_STATIC_CALL_INLINE
bool
depends on HAVE_STATIC_CALL
source "kernel/gcov/Kconfig" source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig" source "scripts/gcc-plugins/Kconfig"
......
...@@ -215,6 +215,8 @@ config X86 ...@@ -215,6 +215,8 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64 select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL
select HAVE_STATIC_CALL_INLINE if HAVE_STACK_VALIDATION
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_UNSTABLE_SCHED_CLOCK
...@@ -230,6 +232,7 @@ config X86 ...@@ -230,6 +232,7 @@ config X86
select RTC_MC146818_LIB select RTC_MC146818_LIB
select SPARSE_IRQ select SPARSE_IRQ
select SRCU select SRCU
select STACK_VALIDATION if HAVE_STACK_VALIDATION && (HAVE_STATIC_CALL_INLINE || RETPOLINE)
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT select USER_STACKTRACE_SUPPORT
...@@ -451,7 +454,6 @@ config GOLDFISH ...@@ -451,7 +454,6 @@ config GOLDFISH
config RETPOLINE config RETPOLINE
bool "Avoid speculative indirect branches in kernel" bool "Avoid speculative indirect branches in kernel"
default y default y
select STACK_VALIDATION if HAVE_STACK_VALIDATION
help help
Compile kernel with the retpoline compiler options to guard against Compile kernel with the retpoline compiler options to guard against
kernel-to-user data leaks by avoiding speculative indirect kernel-to-user data leaks by avoiding speculative indirect
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/static_call.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
...@@ -52,6 +53,34 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { ...@@ -52,6 +53,34 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key); DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
/*
* This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
* from just a typename, as opposed to an actual function.
*/
DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
u64 __read_mostly hw_cache_event_ids u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_OP_MAX]
...@@ -660,7 +689,7 @@ static void x86_pmu_disable(struct pmu *pmu) ...@@ -660,7 +689,7 @@ static void x86_pmu_disable(struct pmu *pmu)
cpuc->enabled = 0; cpuc->enabled = 0;
barrier(); barrier();
x86_pmu.disable_all(); static_call(x86_pmu_disable_all)();
} }
void x86_pmu_enable_all(int added) void x86_pmu_enable_all(int added)
...@@ -907,8 +936,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -907,8 +936,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD) if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
n0 -= cpuc->n_txn; n0 -= cpuc->n_txn;
if (x86_pmu.start_scheduling) static_call_cond(x86_pmu_start_scheduling)(cpuc);
x86_pmu.start_scheduling(cpuc);
for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) { for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
c = cpuc->event_constraint[i]; c = cpuc->event_constraint[i];
...@@ -925,7 +953,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -925,7 +953,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* change due to external factors (sibling state, allow_tfa). * change due to external factors (sibling state, allow_tfa).
*/ */
if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) { if (!c || (c->flags & PERF_X86_EVENT_DYNAMIC)) {
c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]); c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]);
cpuc->event_constraint[i] = c; cpuc->event_constraint[i] = c;
} }
...@@ -1008,8 +1036,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1008,8 +1036,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
if (!unsched && assign) { if (!unsched && assign) {
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
e = cpuc->event_list[i]; e = cpuc->event_list[i];
if (x86_pmu.commit_scheduling) static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]);
x86_pmu.commit_scheduling(cpuc, i, assign[i]);
} }
} else { } else {
for (i = n0; i < n; i++) { for (i = n0; i < n; i++) {
...@@ -1018,15 +1045,13 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) ...@@ -1018,15 +1045,13 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* /*
* release events that failed scheduling * release events that failed scheduling
*/ */
if (x86_pmu.put_event_constraints) static_call_cond(x86_pmu_put_event_constraints)(cpuc, e);
x86_pmu.put_event_constraints(cpuc, e);
cpuc->event_constraint[i] = NULL; cpuc->event_constraint[i] = NULL;
} }
} }
if (x86_pmu.stop_scheduling) static_call_cond(x86_pmu_stop_scheduling)(cpuc);
x86_pmu.stop_scheduling(cpuc);
return unsched ? -EINVAL : 0; return unsched ? -EINVAL : 0;
} }
...@@ -1226,7 +1251,7 @@ static void x86_pmu_enable(struct pmu *pmu) ...@@ -1226,7 +1251,7 @@ static void x86_pmu_enable(struct pmu *pmu)
cpuc->enabled = 1; cpuc->enabled = 1;
barrier(); barrier();
x86_pmu.enable_all(added); static_call(x86_pmu_enable_all)(added);
} }
static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
...@@ -1347,7 +1372,7 @@ static int x86_pmu_add(struct perf_event *event, int flags) ...@@ -1347,7 +1372,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD) if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto done_collect; goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign); ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret) if (ret)
goto out; goto out;
/* /*
...@@ -1365,13 +1390,11 @@ static int x86_pmu_add(struct perf_event *event, int flags) ...@@ -1365,13 +1390,11 @@ static int x86_pmu_add(struct perf_event *event, int flags)
cpuc->n_added += n - n0; cpuc->n_added += n - n0;
cpuc->n_txn += n - n0; cpuc->n_txn += n - n0;
if (x86_pmu.add) { /*
/* * This is before x86_pmu_enable() will call x86_pmu_start(),
* This is before x86_pmu_enable() will call x86_pmu_start(), * so we enable LBRs before an event needs them etc..
* so we enable LBRs before an event needs them etc.. */
*/ static_call_cond(x86_pmu_add)(event);
x86_pmu.add(event);
}
ret = 0; ret = 0;
out: out:
...@@ -1399,7 +1422,7 @@ static void x86_pmu_start(struct perf_event *event, int flags) ...@@ -1399,7 +1422,7 @@ static void x86_pmu_start(struct perf_event *event, int flags)
cpuc->events[idx] = event; cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask); __set_bit(idx, cpuc->active_mask);
__set_bit(idx, cpuc->running); __set_bit(idx, cpuc->running);
x86_pmu.enable(event); static_call(x86_pmu_enable)(event);
perf_event_update_userpage(event); perf_event_update_userpage(event);
} }
...@@ -1469,7 +1492,7 @@ void x86_pmu_stop(struct perf_event *event, int flags) ...@@ -1469,7 +1492,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
if (test_bit(hwc->idx, cpuc->active_mask)) { if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event); static_call(x86_pmu_disable)(event);
__clear_bit(hwc->idx, cpuc->active_mask); __clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL; cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
...@@ -1519,8 +1542,7 @@ static void x86_pmu_del(struct perf_event *event, int flags) ...@@ -1519,8 +1542,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (i >= cpuc->n_events - cpuc->n_added) if (i >= cpuc->n_events - cpuc->n_added)
--cpuc->n_added; --cpuc->n_added;
if (x86_pmu.put_event_constraints) static_call_cond(x86_pmu_put_event_constraints)(cpuc, event);
x86_pmu.put_event_constraints(cpuc, event);
/* Delete the array entry. */ /* Delete the array entry. */
while (++i < cpuc->n_events) { while (++i < cpuc->n_events) {
...@@ -1533,13 +1555,12 @@ static void x86_pmu_del(struct perf_event *event, int flags) ...@@ -1533,13 +1555,12 @@ static void x86_pmu_del(struct perf_event *event, int flags)
perf_event_update_userpage(event); perf_event_update_userpage(event);
do_del: do_del:
if (x86_pmu.del) {
/* /*
* This is after x86_pmu_stop(); so we disable LBRs after any * This is after x86_pmu_stop(); so we disable LBRs after any
* event can need them etc.. * event can need them etc..
*/ */
x86_pmu.del(event); static_call_cond(x86_pmu_del)(event);
}
} }
int x86_pmu_handle_irq(struct pt_regs *regs) int x86_pmu_handle_irq(struct pt_regs *regs)
...@@ -1617,7 +1638,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) ...@@ -1617,7 +1638,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
return NMI_DONE; return NMI_DONE;
start_clock = sched_clock(); start_clock = sched_clock();
ret = x86_pmu.handle_irq(regs); ret = static_call(x86_pmu_handle_irq)(regs);
finish_clock = sched_clock(); finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock); perf_sample_event_took(finish_clock - start_clock);
...@@ -1830,6 +1851,38 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) ...@@ -1830,6 +1851,38 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
static struct attribute_group x86_pmu_attr_group; static struct attribute_group x86_pmu_attr_group;
static struct attribute_group x86_pmu_caps_group; static struct attribute_group x86_pmu_caps_group;
static void x86_pmu_static_call_update(void)
{
static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq);
static_call_update(x86_pmu_disable_all, x86_pmu.disable_all);
static_call_update(x86_pmu_enable_all, x86_pmu.enable_all);
static_call_update(x86_pmu_enable, x86_pmu.enable);
static_call_update(x86_pmu_disable, x86_pmu.disable);
static_call_update(x86_pmu_add, x86_pmu.add);
static_call_update(x86_pmu_del, x86_pmu.del);
static_call_update(x86_pmu_read, x86_pmu.read);
static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events);
static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints);
static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints);
static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling);
static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling);
static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling);
static_call_update(x86_pmu_sched_task, x86_pmu.sched_task);
static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx);
static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs);
static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases);
}
static void _x86_pmu_read(struct perf_event *event)
{
x86_perf_event_update(event);
}
static int __init init_hw_perf_events(void) static int __init init_hw_perf_events(void)
{ {
struct x86_pmu_quirk *quirk; struct x86_pmu_quirk *quirk;
...@@ -1898,6 +1951,11 @@ static int __init init_hw_perf_events(void) ...@@ -1898,6 +1951,11 @@ static int __init init_hw_perf_events(void)
pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;
x86_pmu_static_call_update();
/* /*
* Install callbacks. Core will call them for each online * Install callbacks. Core will call them for each online
* cpu. * cpu.
...@@ -1934,11 +1992,9 @@ static int __init init_hw_perf_events(void) ...@@ -1934,11 +1992,9 @@ static int __init init_hw_perf_events(void)
} }
early_initcall(init_hw_perf_events); early_initcall(init_hw_perf_events);
static inline void x86_pmu_read(struct perf_event *event) static void x86_pmu_read(struct perf_event *event)
{ {
if (x86_pmu.read) static_call(x86_pmu_read)(event);
return x86_pmu.read(event);
x86_perf_event_update(event);
} }
/* /*
...@@ -2015,7 +2071,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) ...@@ -2015,7 +2071,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return -EAGAIN; return -EAGAIN;
ret = x86_pmu.schedule_events(cpuc, n, assign); ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign);
if (ret) if (ret)
return ret; return ret;
...@@ -2308,15 +2364,13 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { ...@@ -2308,15 +2364,13 @@ static const struct attribute_group *x86_pmu_attr_groups[] = {
static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
{ {
if (x86_pmu.sched_task) static_call_cond(x86_pmu_sched_task)(ctx, sched_in);
x86_pmu.sched_task(ctx, sched_in);
} }
static void x86_pmu_swap_task_ctx(struct perf_event_context *prev, static void x86_pmu_swap_task_ctx(struct perf_event_context *prev,
struct perf_event_context *next) struct perf_event_context *next)
{ {
if (x86_pmu.swap_task_ctx) static_call_cond(x86_pmu_swap_task_ctx)(prev, next);
x86_pmu.swap_task_ctx(prev, next);
} }
void perf_check_microcode(void) void perf_check_microcode(void)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_STATIC_CALL_H
#define _ASM_STATIC_CALL_H
#include <asm/text-patching.h>
/*
* For CONFIG_HAVE_STATIC_CALL_INLINE, this is a temporary trampoline which
* uses the current value of the key->func pointer to do an indirect jump to
* the function. This trampoline is only used during boot, before the call
* sites get patched by static_call_update(). The name of this trampoline has
* a magical aspect: objtool uses it to find static call sites so it can create
* the .static_call_sites section.
*
* For CONFIG_HAVE_STATIC_CALL, this is a permanent trampoline which
* does a direct jump to the function. The direct jump gets patched by
* static_call_update().
*
* Having the trampoline in a special section forces GCC to emit a JMP.d32 when
* it does tail-call optimization on the call; since you cannot compute the
* relative displacement across sections.
*/
#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, insns) \
asm(".pushsection .static_call.text, \"ax\" \n" \
".align 4 \n" \
".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
STATIC_CALL_TRAMP_STR(name) ": \n" \
insns " \n" \
".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
".popsection \n")
#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
#endif /* _ASM_STATIC_CALL_H */
...@@ -53,6 +53,9 @@ extern void text_poke_finish(void); ...@@ -53,6 +53,9 @@ extern void text_poke_finish(void);
#define INT3_INSN_SIZE 1 #define INT3_INSN_SIZE 1
#define INT3_INSN_OPCODE 0xCC #define INT3_INSN_OPCODE 0xCC
#define RET_INSN_SIZE 1
#define RET_INSN_OPCODE 0xC3
#define CALL_INSN_SIZE 5 #define CALL_INSN_SIZE 5
#define CALL_INSN_OPCODE 0xE8 #define CALL_INSN_OPCODE 0xE8
...@@ -73,6 +76,7 @@ static __always_inline int text_opcode_size(u8 opcode) ...@@ -73,6 +76,7 @@ static __always_inline int text_opcode_size(u8 opcode)
switch(opcode) { switch(opcode) {
__CASE(INT3); __CASE(INT3);
__CASE(RET);
__CASE(CALL); __CASE(CALL);
__CASE(JMP32); __CASE(JMP32);
__CASE(JMP8); __CASE(JMP8);
...@@ -140,12 +144,27 @@ void int3_emulate_push(struct pt_regs *regs, unsigned long val) ...@@ -140,12 +144,27 @@ void int3_emulate_push(struct pt_regs *regs, unsigned long val)
*(unsigned long *)regs->sp = val; *(unsigned long *)regs->sp = val;
} }
static __always_inline
unsigned long int3_emulate_pop(struct pt_regs *regs)
{
unsigned long val = *(unsigned long *)regs->sp;
regs->sp += sizeof(unsigned long);
return val;
}
static __always_inline static __always_inline
void int3_emulate_call(struct pt_regs *regs, unsigned long func) void int3_emulate_call(struct pt_regs *regs, unsigned long func)
{ {
int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
int3_emulate_jmp(regs, func); int3_emulate_jmp(regs, func);
} }
static __always_inline
void int3_emulate_ret(struct pt_regs *regs)
{
unsigned long ip = int3_emulate_pop(regs);
int3_emulate_jmp(regs, ip);
}
#endif /* !CONFIG_UML_X86 */ #endif /* !CONFIG_UML_X86 */
#endif /* _ASM_X86_TEXT_PATCHING_H */ #endif /* _ASM_X86_TEXT_PATCHING_H */
...@@ -68,6 +68,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o ...@@ -68,6 +68,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o obj-y += pci-iommu_table.o
obj-y += resource.o obj-y += resource.o
obj-y += irqflags.o obj-y += irqflags.o
obj-y += static_call.o
obj-y += process.o obj-y += process.o
obj-y += fpu/ obj-y += fpu/
......
...@@ -1103,6 +1103,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs) ...@@ -1103,6 +1103,10 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
*/ */
goto out_put; goto out_put;
case RET_INSN_OPCODE:
int3_emulate_ret(regs);
break;
case CALL_INSN_OPCODE: case CALL_INSN_OPCODE:
int3_emulate_call(regs, (long)ip + tp->rel32); int3_emulate_call(regs, (long)ip + tp->rel32);
break; break;
...@@ -1277,6 +1281,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr, ...@@ -1277,6 +1281,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
switch (tp->opcode) { switch (tp->opcode) {
case INT3_INSN_OPCODE: case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
break; break;
case CALL_INSN_OPCODE: case CALL_INSN_OPCODE:
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/frame.h> #include <linux/frame.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/static_call.h>
#include <asm/text-patching.h> #include <asm/text-patching.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -210,7 +211,8 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) ...@@ -210,7 +211,8 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
/* Check whether the address range is reserved */ /* Check whether the address range is reserved */
if (ftrace_text_reserved(src, src + len - 1) || if (ftrace_text_reserved(src, src + len - 1) ||
alternatives_text_reserved(src, src + len - 1) || alternatives_text_reserved(src, src + len - 1) ||
jump_label_text_reserved(src, src + len - 1)) jump_label_text_reserved(src, src + len - 1) ||
static_call_text_reserved(src, src + len - 1))
return -EBUSY; return -EBUSY;
return len; return len;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/usb/xhci-dbgp.h> #include <linux/usb/xhci-dbgp.h>
#include <linux/static_call.h>
#include <uapi/linux/mount.h> #include <uapi/linux/mount.h>
...@@ -849,6 +850,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -849,6 +850,7 @@ void __init setup_arch(char **cmdline_p)
early_cpu_init(); early_cpu_init();
arch_init_ideal_nops(); arch_init_ideal_nops();
jump_label_init(); jump_label_init();
static_call_init();
early_ioremap_init(); early_ioremap_init();
setup_olpc_ofw_pgd(); setup_olpc_ofw_pgd();
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/static_call.h>
#include <linux/memory.h>
#include <linux/bug.h>
#include <asm/text-patching.h>
enum insn_type {
CALL = 0, /* site call */
NOP = 1, /* site cond-call */
JMP = 2, /* tramp / site tail-call */
RET = 3, /* tramp / site cond-tail-call */
};
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
int size = CALL_INSN_SIZE;
const void *code;
switch (type) {
case CALL:
code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
break;
case NOP:
code = ideal_nops[NOP_ATOMIC5];
break;
case JMP:
code = text_gen_insn(JMP32_INSN_OPCODE, insn, func);
break;
case RET:
code = text_gen_insn(RET_INSN_OPCODE, insn, func);
size = RET_INSN_SIZE;
break;
}
if (memcmp(insn, code, size) == 0)
return;
if (unlikely(system_state == SYSTEM_BOOTING))
return text_poke_early(insn, code, size);
text_poke_bp(insn, code, size, NULL);
}
static void __static_call_validate(void *insn, bool tail)
{
u8 opcode = *(u8 *)insn;
if (tail) {
if (opcode == JMP32_INSN_OPCODE ||
opcode == RET_INSN_OPCODE)
return;
} else {
if (opcode == CALL_INSN_OPCODE ||
!memcmp(insn, ideal_nops[NOP_ATOMIC5], 5))
return;
}
/*
* If we ever trigger this, our text is corrupt, we'll probably not live long.
*/
WARN_ONCE(1, "unexpected static_call insn opcode 0x%x at %pS\n", opcode, insn);
}
static inline enum insn_type __sc_insn(bool null, bool tail)
{
/*
* Encode the following table without branches:
*
* tail null insn
* -----+-------+------
* 0 | 0 | CALL
* 0 | 1 | NOP
* 1 | 0 | JMP
* 1 | 1 | RET
*/
return 2*tail + null;
}
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
{
mutex_lock(&text_mutex);
if (tramp) {
__static_call_validate(tramp, true);
__static_call_transform(tramp, __sc_insn(!func, true), func);
}
if (IS_ENABLED(CONFIG_HAVE_STATIC_CALL_INLINE) && site) {
__static_call_validate(site, tail);
__static_call_transform(site, __sc_insn(!func, tail), func);
}
mutex_unlock(&text_mutex);
}
EXPORT_SYMBOL_GPL(arch_static_call_transform);
...@@ -136,6 +136,7 @@ SECTIONS ...@@ -136,6 +136,7 @@ SECTIONS
ENTRY_TEXT ENTRY_TEXT
ALIGN_ENTRY_TEXT_END ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
STATIC_CALL_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -116,7 +116,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data) ...@@ -116,7 +116,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
if (val != MODULE_STATE_COMING) if (val != MODULE_STATE_COMING)
return 0; return NOTIFY_DONE;
/* FIXME: should we process all CPU buffers ? */ /* FIXME: should we process all CPU buffers ? */
mutex_lock(&buffer_mutex); mutex_lock(&buffer_mutex);
...@@ -124,7 +124,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data) ...@@ -124,7 +124,7 @@ module_load_notify(struct notifier_block *self, unsigned long val, void *data)
add_event_entry(MODULE_LOADED_CODE); add_event_entry(MODULE_LOADED_CODE);
mutex_unlock(&buffer_mutex); mutex_unlock(&buffer_mutex);
#endif #endif
return 0; return NOTIFY_OK;
} }
......
...@@ -389,6 +389,12 @@ ...@@ -389,6 +389,12 @@
KEEP(*(__jump_table)) \ KEEP(*(__jump_table)) \
__stop___jump_table = .; __stop___jump_table = .;
#define STATIC_CALL_DATA \
. = ALIGN(8); \
__start_static_call_sites = .; \
KEEP(*(.static_call_sites)) \
__stop_static_call_sites = .;
/* /*
* Allow architectures to handle ro_after_init data on their * Allow architectures to handle ro_after_init data on their
* own by defining an empty RO_AFTER_INIT_DATA. * own by defining an empty RO_AFTER_INIT_DATA.
...@@ -399,6 +405,7 @@ ...@@ -399,6 +405,7 @@
__start_ro_after_init = .; \ __start_ro_after_init = .; \
*(.data..ro_after_init) \ *(.data..ro_after_init) \
JUMP_TABLE_DATA \ JUMP_TABLE_DATA \
STATIC_CALL_DATA \
__end_ro_after_init = .; __end_ro_after_init = .;
#endif #endif
...@@ -639,6 +646,12 @@ ...@@ -639,6 +646,12 @@
*(.softirqentry.text) \ *(.softirqentry.text) \
__softirqentry_text_end = .; __softirqentry_text_end = .;
#define STATIC_CALL_TEXT \
ALIGN_FUNCTION(); \
__static_call_text_start = .; \
*(.static_call.text) \
__static_call_text_end = .;
/* Section used for early init (in .S files) */ /* Section used for early init (in .S files) */
#define HEAD_TEXT KEEP(*(.head.text)) #define HEAD_TEXT KEEP(*(.head.text))
......
...@@ -207,7 +207,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ...@@ -207,7 +207,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/ */
#define __ADDRESSABLE(sym) \ #define __ADDRESSABLE(sym) \
static void * __section(.discard.addressable) __used \ static void * __section(.discard.addressable) __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym; __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
/** /**
* offset_to_ptr - convert a relative memory offset to an absolute pointer * offset_to_ptr - convert a relative memory offset to an absolute pointer
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/error-injection.h> #include <linux/error-injection.h>
#include <linux/tracepoint-defs.h> #include <linux/tracepoint-defs.h>
#include <linux/srcu.h> #include <linux/srcu.h>
#include <linux/static_call_types.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/module.h> #include <asm/module.h>
...@@ -498,6 +499,10 @@ struct module { ...@@ -498,6 +499,10 @@ struct module {
unsigned long *kprobe_blacklist; unsigned long *kprobe_blacklist;
unsigned int num_kprobe_blacklist; unsigned int num_kprobe_blacklist;
#endif #endif
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
int num_static_call_sites;
struct static_call_site *static_call_sites;
#endif
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */ bool klp; /* Is this a livepatch module? */
......
...@@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, ...@@ -161,20 +161,19 @@ extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v); unsigned long val, void *v);
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v); unsigned long val, void *v);
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int raw_notifier_call_chain(struct raw_notifier_head *nh, extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v); unsigned long val, void *v);
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls);
extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v); unsigned long val, void *v);
extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v, int nr_to_call, int *nr_calls); extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v);
#define NOTIFY_DONE 0x0000 /* Don't care */ #define NOTIFY_DONE 0x0000 /* Don't care */
#define NOTIFY_OK 0x0001 /* Suits me */ #define NOTIFY_OK 0x0001 /* Suits me */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_STATIC_CALL_H
#define _LINUX_STATIC_CALL_H
/*
* Static call support
*
* Static calls use code patching to hard-code function pointers into direct
* branch instructions. They give the flexibility of function pointers, but
* with improved performance. This is especially important for cases where
* retpolines would otherwise be used, as retpolines can significantly impact
* performance.
*
*
* API overview:
*
* DECLARE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL(name, func);
* DEFINE_STATIC_CALL_NULL(name, typename);
* static_call(name)(args...);
* static_call_cond(name)(args...);
* static_call_update(name, func);
*
* Usage example:
*
* # Start with the following functions (with identical prototypes):
* int func_a(int arg1, int arg2);
* int func_b(int arg1, int arg2);
*
* # Define a 'my_name' reference, associated with func_a() by default
* DEFINE_STATIC_CALL(my_name, func_a);
*
* # Call func_a()
* static_call(my_name)(arg1, arg2);
*
* # Update 'my_name' to point to func_b()
* static_call_update(my_name, &func_b);
*
* # Call func_b()
* static_call(my_name)(arg1, arg2);
*
*
* Implementation details:
*
* This requires some arch-specific code (CONFIG_HAVE_STATIC_CALL).
* Otherwise basic indirect calls are used (with function pointers).
*
* Each static_call() site calls into a trampoline associated with the name.
* The trampoline has a direct branch to the default function. Updates to a
* name will modify the trampoline's branch destination.
*
* If the arch has CONFIG_HAVE_STATIC_CALL_INLINE, then the call sites
* themselves will be patched at runtime to call the functions directly,
* rather than calling through the trampoline. This requires objtool or a
* compiler plugin to detect all the static_call() sites and annotate them
* in the .static_call_sites section.
*
*
* Notes on NULL function pointers:
*
* Static_call()s support NULL functions, with many of the caveats that
* regular function pointers have.
*
* Clearly calling a NULL function pointer is 'BAD', so too for
* static_call()s (although when HAVE_STATIC_CALL it might not be immediately
* fatal). A NULL static_call can be the result of:
*
* DECLARE_STATIC_CALL_NULL(my_static_call, void (*)(int));
*
* which is equivalent to declaring a NULL function pointer with just a
* typename:
*
* void (*my_func_ptr)(int arg1) = NULL;
*
* or using static_call_update() with a NULL function. In both cases the
* HAVE_STATIC_CALL implementation will patch the trampoline with a RET
* instruction, instead of an immediate tail-call JMP. HAVE_STATIC_CALL_INLINE
* architectures can patch the trampoline call to a NOP.
*
* In all cases, any argument evaluation is unconditional. Unlike a regular
* conditional function pointer call:
*
* if (my_func_ptr)
* my_func_ptr(arg1)
*
* where the argument evaludation also depends on the pointer value.
*
* When calling a static_call that can be NULL, use:
*
* static_call_cond(name)(arg1);
*
* which will include the required value tests to avoid NULL-pointer
* dereferences.
*/
#include <linux/types.h>
#include <linux/cpu.h>
#include <linux/static_call_types.h>
#ifdef CONFIG_HAVE_STATIC_CALL
#include <asm/static_call.h>
/*
* Either @site or @tramp can be NULL.
*/
extern void arch_static_call_transform(void *site, void *tramp, void *func, bool tail);
#define STATIC_CALL_TRAMP_ADDR(name) &STATIC_CALL_TRAMP(name)
/*
* __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
* the symbol table so that objtool can reference it when it generates the
* .static_call_sites section.
*/
#define __static_call(name) \
({ \
__ADDRESSABLE(STATIC_CALL_KEY(name)); \
&STATIC_CALL_TRAMP(name); \
})
#else
#define STATIC_CALL_TRAMP_ADDR(name) NULL
#endif
#define DECLARE_STATIC_CALL(name, func) \
extern struct static_call_key STATIC_CALL_KEY(name); \
extern typeof(func) STATIC_CALL_TRAMP(name);
#define static_call_update(name, func) \
({ \
BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \
__static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), func); \
})
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
extern int __init static_call_init(void);
struct static_call_mod {
struct static_call_mod *next;
struct module *mod; /* for vmlinux, mod == NULL */
struct static_call_site *sites;
};
struct static_call_key {
void *func;
union {
/* bit 0: 0 = mods, 1 = sites */
unsigned long type;
struct static_call_mod *mods;
struct static_call_site *sites;
};
};
extern void __static_call_update(struct static_call_key *key, void *tramp, void *func);
extern int static_call_mod_init(struct module *mod);
extern int static_call_text_reserved(void *start, void *end);
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
.type = 1, \
}; \
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
.type = 1, \
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
#elif defined(CONFIG_HAVE_STATIC_CALL)
static inline int static_call_init(void) { return 0; }
struct static_call_key {
void *func;
};
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
}; \
ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
}; \
ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
#define static_call(name) __static_call(name)
#define static_call_cond(name) (void)__static_call(name)
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
cpus_read_lock();
WRITE_ONCE(key->func, func);
arch_static_call_transform(NULL, tramp, func, false);
cpus_read_unlock();
}
static inline int static_call_text_reserved(void *start, void *end)
{
return 0;
}
#define EXPORT_STATIC_CALL(name) \
EXPORT_SYMBOL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL(STATIC_CALL_TRAMP(name))
#define EXPORT_STATIC_CALL_GPL(name) \
EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name)); \
EXPORT_SYMBOL_GPL(STATIC_CALL_TRAMP(name))
#else /* Generic implementation */
static inline int static_call_init(void) { return 0; }
struct static_call_key {
void *func;
};
#define DEFINE_STATIC_CALL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = _func, \
}
#define DEFINE_STATIC_CALL_NULL(name, _func) \
DECLARE_STATIC_CALL(name, _func); \
struct static_call_key STATIC_CALL_KEY(name) = { \
.func = NULL, \
}
#define static_call(name) \
((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
static inline void __static_call_nop(void) { }
/*
* This horrific hack takes care of two things:
*
* - it ensures the compiler will only load the function pointer ONCE,
* which avoids a reload race.
*
* - it ensures the argument evaluation is unconditional, similar
* to the HAVE_STATIC_CALL variant.
*
* Sadly current GCC/Clang (10 for both) do not optimize this properly
* and will emit an indirect call for the NULL case :-(
*/
#define __static_call_cond(name) \
({ \
void *func = READ_ONCE(STATIC_CALL_KEY(name).func); \
if (!func) \
func = &__static_call_nop; \
(typeof(STATIC_CALL_TRAMP(name))*)func; \
})
#define static_call_cond(name) (void)__static_call_cond(name)
static inline
void __static_call_update(struct static_call_key *key, void *tramp, void *func)
{
WRITE_ONCE(key->func, func);
}
static inline int static_call_text_reserved(void *start, void *end)
{
return 0;
}
#define EXPORT_STATIC_CALL(name) EXPORT_SYMBOL(STATIC_CALL_KEY(name))
#define EXPORT_STATIC_CALL_GPL(name) EXPORT_SYMBOL_GPL(STATIC_CALL_KEY(name))
#endif /* CONFIG_HAVE_STATIC_CALL */
#endif /* _LINUX_STATIC_CALL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _STATIC_CALL_TYPES_H
#define _STATIC_CALL_TYPES_H
#include <linux/types.h>
#include <linux/stringify.h>
#define STATIC_CALL_KEY_PREFIX __SCK__
#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
#define STATIC_CALL_TRAMP_PREFIX __SCT__
#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
/*
* Flags in the low bits of static_call_site::key.
*/
#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
*/
struct static_call_site {
s32 addr;
s32 key;
};
#endif /* _STATIC_CALL_TYPES_H */
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/static_key.h> #include <linux/static_key.h>
struct static_call_key;
struct trace_print_flags { struct trace_print_flags {
unsigned long mask; unsigned long mask;
const char *name; const char *name;
...@@ -30,6 +32,9 @@ struct tracepoint_func { ...@@ -30,6 +32,9 @@ struct tracepoint_func {
struct tracepoint { struct tracepoint {
const char *name; /* Tracepoint name */ const char *name; /* Tracepoint name */
struct static_key key; struct static_key key;
struct static_call_key *static_call_key;
void *static_call_tramp;
void *iterator;
int (*regfunc)(void); int (*regfunc)(void);
void (*unregfunc)(void); void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs; struct tracepoint_func __rcu *funcs;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/tracepoint-defs.h> #include <linux/tracepoint-defs.h>
#include <linux/static_call.h>
struct module; struct module;
struct tracepoint; struct tracepoint;
...@@ -92,7 +93,9 @@ extern int syscall_regfunc(void); ...@@ -92,7 +93,9 @@ extern int syscall_regfunc(void);
extern void syscall_unregfunc(void); extern void syscall_unregfunc(void);
#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */ #endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
#ifndef PARAMS
#define PARAMS(args...) args #define PARAMS(args...) args
#endif
#define TRACE_DEFINE_ENUM(x) #define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x) #define TRACE_DEFINE_SIZEOF(x)
...@@ -148,6 +151,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -148,6 +151,12 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#ifdef TRACEPOINTS_ENABLED #ifdef TRACEPOINTS_ENABLED
#ifdef CONFIG_HAVE_STATIC_CALL
#define __DO_TRACE_CALL(name) static_call(tp_func_##name)
#else
#define __DO_TRACE_CALL(name) __traceiter_##name
#endif /* CONFIG_HAVE_STATIC_CALL */
/* /*
* it_func[0] is never NULL because there is at least one element in the array * it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL. * when the array itself is non NULL.
...@@ -157,12 +166,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -157,12 +166,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* has a "void" prototype, then it is invalid to declare a function * has a "void" prototype, then it is invalid to declare a function
* as "(void *, void)". * as "(void *, void)".
*/ */
#define __DO_TRACE(tp, proto, args, cond, rcuidle) \ #define __DO_TRACE(name, proto, args, cond, rcuidle) \
do { \ do { \
struct tracepoint_func *it_func_ptr; \ struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
int __maybe_unused __idx = 0; \ int __maybe_unused __idx = 0; \
void *__data; \
\ \
if (!(cond)) \ if (!(cond)) \
return; \ return; \
...@@ -182,14 +190,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -182,14 +190,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
rcu_irq_enter_irqson(); \ rcu_irq_enter_irqson(); \
} \ } \
\ \
it_func_ptr = rcu_dereference_raw((tp)->funcs); \ it_func_ptr = \
\ rcu_dereference_raw((&__tracepoint_##name)->funcs); \
if (it_func_ptr) { \ if (it_func_ptr) { \
do { \ __data = (it_func_ptr)->data; \
it_func = (it_func_ptr)->func; \ __DO_TRACE_CALL(name)(args); \
__data = (it_func_ptr)->data; \
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
} \ } \
\ \
if (rcuidle) { \ if (rcuidle) { \
...@@ -205,7 +210,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -205,7 +210,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static inline void trace_##name##_rcuidle(proto) \ static inline void trace_##name##_rcuidle(proto) \
{ \ { \
if (static_key_false(&__tracepoint_##name.key)) \ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond), 1); \ TP_CONDITION(cond), 1); \
...@@ -227,11 +232,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -227,11 +232,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* poking RCU a bit. * poking RCU a bit.
*/ */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern int __traceiter_##name(data_proto); \
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
if (static_key_false(&__tracepoint_##name.key)) \ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(name, \
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond), 0); \ TP_CONDITION(cond), 0); \
...@@ -277,21 +284,50 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -277,21 +284,50 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* structures, so we create an array of pointers that will be used for iteration * structures, so we create an array of pointers that will be used for iteration
* on the tracepoints. * on the tracepoints.
*/ */
#define DEFINE_TRACE_FN(name, reg, unreg) \ #define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \
static const char __tpstrtab_##name[] \ static const char __tpstrtab_##_name[] \
__section(__tracepoints_strings) = #name; \ __section(__tracepoints_strings) = #_name; \
struct tracepoint __tracepoint_##name __used \ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
__section(__tracepoints) = \ int __traceiter_##_name(void *__data, proto); \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ struct tracepoint __tracepoint_##_name __used \
__TRACEPOINT_ENTRY(name); __section(__tracepoints) = { \
.name = __tpstrtab_##_name, \
.key = STATIC_KEY_INIT_FALSE, \
.static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
.iterator = &__traceiter_##_name, \
.regfunc = _reg, \
.unregfunc = _unreg, \
.funcs = NULL }; \
__TRACEPOINT_ENTRY(_name); \
int __traceiter_##_name(void *__data, proto) \
{ \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
\
it_func_ptr = \
rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
do { \
it_func = (it_func_ptr)->func; \
__data = (it_func_ptr)->data; \
((void(*)(void *, proto))(it_func))(__data, args); \
} while ((++it_func_ptr)->func); \
return 0; \
} \
DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
#define DEFINE_TRACE(name) \ #define DEFINE_TRACE(name, proto, args) \
DEFINE_TRACE_FN(name, NULL, NULL); DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args));
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
EXPORT_SYMBOL_GPL(__tracepoint_##name) EXPORT_SYMBOL_GPL(__tracepoint_##name); \
EXPORT_SYMBOL_GPL(__traceiter_##name); \
EXPORT_STATIC_CALL_GPL(tp_func_##name)
#define EXPORT_TRACEPOINT_SYMBOL(name) \ #define EXPORT_TRACEPOINT_SYMBOL(name) \
EXPORT_SYMBOL(__tracepoint_##name) EXPORT_SYMBOL(__tracepoint_##name); \
EXPORT_SYMBOL(__traceiter_##name); \
EXPORT_STATIC_CALL(tp_func_##name)
#else /* !TRACEPOINTS_ENABLED */ #else /* !TRACEPOINTS_ENABLED */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
...@@ -320,8 +356,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) ...@@ -320,8 +356,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
return false; \ return false; \
} }
#define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
#define DEFINE_TRACE(name) #define DEFINE_TRACE(name, proto, args)
#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
#define EXPORT_TRACEPOINT_SYMBOL(name) #define EXPORT_TRACEPOINT_SYMBOL(name)
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
DEFINE_TRACE(name) DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_CONDITION #undef TRACE_EVENT_CONDITION
#define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \ #define TRACE_EVENT_CONDITION(name, proto, args, cond, tstruct, assign, print) \
...@@ -39,12 +39,12 @@ ...@@ -39,12 +39,12 @@
#undef TRACE_EVENT_FN #undef TRACE_EVENT_FN
#define TRACE_EVENT_FN(name, proto, args, tstruct, \ #define TRACE_EVENT_FN(name, proto, args, tstruct, \
assign, print, reg, unreg) \ assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg) DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_FN_COND #undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \ #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \ assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg) DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef TRACE_EVENT_NOP #undef TRACE_EVENT_NOP
#define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print)
...@@ -54,15 +54,15 @@ ...@@ -54,15 +54,15 @@
#undef DEFINE_EVENT #undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \ #define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name) DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_FN #undef DEFINE_EVENT_FN
#define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \ #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg) DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_PRINT #undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_TRACE(name) DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef DEFINE_EVENT_CONDITION #undef DEFINE_EVENT_CONDITION
#define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \ #define DEFINE_EVENT_CONDITION(template, name, proto, args, cond) \
...@@ -70,7 +70,7 @@ ...@@ -70,7 +70,7 @@
#undef DECLARE_TRACE #undef DECLARE_TRACE
#define DECLARE_TRACE(name, proto, args) \ #define DECLARE_TRACE(name, proto, args) \
DEFINE_TRACE(name) DEFINE_TRACE(name, PARAMS(proto), PARAMS(args))
#undef TRACE_INCLUDE #undef TRACE_INCLUDE
#undef __TRACE_INCLUDE #undef __TRACE_INCLUDE
......
...@@ -111,6 +111,7 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o ...@@ -111,6 +111,7 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/ obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_KCSAN) += kcsan/ obj-$(CONFIG_KCSAN) += kcsan/
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
obj-$(CONFIG_PERF_EVENTS) += events/ obj-$(CONFIG_PERF_EVENTS) += events/
......
...@@ -15,18 +15,28 @@ ...@@ -15,18 +15,28 @@
static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain); static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) static int cpu_pm_notify(enum cpu_pm_event event)
{ {
int ret; int ret;
/* /*
* __atomic_notifier_call_chain has a RCU read critical section, which * atomic_notifier_call_chain has a RCU read critical section, which
* could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
* RCU know this. * RCU know this.
*/ */
rcu_irq_enter_irqson(); rcu_irq_enter_irqson();
ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
nr_to_call, nr_calls); rcu_irq_exit_irqson();
return notifier_to_errno(ret);
}
static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
{
int ret;
rcu_irq_enter_irqson();
ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
rcu_irq_exit_irqson(); rcu_irq_exit_irqson();
return notifier_to_errno(ret); return notifier_to_errno(ret);
...@@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); ...@@ -80,18 +90,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
*/ */
int cpu_pm_enter(void) int cpu_pm_enter(void)
{ {
int nr_calls = 0; return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED);
int ret = 0;
ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU PM
* PM entry who are notified earlier to prepare for it.
*/
cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_pm_enter); EXPORT_SYMBOL_GPL(cpu_pm_enter);
...@@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter); ...@@ -109,7 +108,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
*/ */
int cpu_pm_exit(void) int cpu_pm_exit(void)
{ {
return cpu_pm_notify(CPU_PM_EXIT, -1, NULL); return cpu_pm_notify(CPU_PM_EXIT);
} }
EXPORT_SYMBOL_GPL(cpu_pm_exit); EXPORT_SYMBOL_GPL(cpu_pm_exit);
...@@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit); ...@@ -131,18 +130,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
*/ */
int cpu_cluster_pm_enter(void) int cpu_cluster_pm_enter(void)
{ {
int nr_calls = 0; return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED);
int ret = 0;
ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
if (ret)
/*
* Inform listeners (nr_calls - 1) about failure of CPU cluster
* PM entry who are notified earlier to prepare for it.
*/
cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
...@@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); ...@@ -163,7 +151,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
*/ */
int cpu_cluster_pm_exit(void) int cpu_cluster_pm_exit(void)
{ {
return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL); return cpu_pm_notify(CPU_CLUSTER_PM_EXIT);
} }
EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
......
...@@ -539,19 +539,25 @@ static void static_key_set_mod(struct static_key *key, ...@@ -539,19 +539,25 @@ static void static_key_set_mod(struct static_key *key,
static int __jump_label_mod_text_reserved(void *start, void *end) static int __jump_label_mod_text_reserved(void *start, void *end)
{ {
struct module *mod; struct module *mod;
int ret;
preempt_disable(); preempt_disable();
mod = __module_text_address((unsigned long)start); mod = __module_text_address((unsigned long)start);
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod); WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
if (!try_module_get(mod))
mod = NULL;
preempt_enable(); preempt_enable();
if (!mod) if (!mod)
return 0; return 0;
ret = __jump_label_text_reserved(mod->jump_entries,
return __jump_label_text_reserved(mod->jump_entries,
mod->jump_entries + mod->num_jump_entries, mod->jump_entries + mod->num_jump_entries,
start, end); start, end);
module_put(mod);
return ret;
} }
static void __jump_label_mod_update(struct static_key *key) static void __jump_label_mod_update(struct static_key *key)
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/static_call.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -1634,6 +1635,7 @@ static int check_kprobe_address_safe(struct kprobe *p, ...@@ -1634,6 +1635,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (!kernel_text_address((unsigned long) p->addr) || if (!kernel_text_address((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) || within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) || jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) { find_bug((unsigned long)p->addr)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
......
...@@ -3274,6 +3274,11 @@ static int find_module_sections(struct module *mod, struct load_info *info) ...@@ -3274,6 +3274,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
sizeof(unsigned long), sizeof(unsigned long),
&mod->num_kprobe_blacklist); &mod->num_kprobe_blacklist);
#endif
#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
mod->static_call_sites = section_objs(info, ".static_call_sites",
sizeof(*mod->static_call_sites),
&mod->num_static_call_sites);
#endif #endif
mod->extable = section_objs(info, "__ex_table", mod->extable = section_objs(info, "__ex_table",
sizeof(*mod->extable), &mod->num_exentries); sizeof(*mod->extable), &mod->num_exentries);
...@@ -3792,9 +3797,13 @@ static int prepare_coming_module(struct module *mod) ...@@ -3792,9 +3797,13 @@ static int prepare_coming_module(struct module *mod)
if (err) if (err)
return err; return err;
blocking_notifier_call_chain(&module_notify_list, err = blocking_notifier_call_chain_robust(&module_notify_list,
MODULE_STATE_COMING, mod); MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
return 0; err = notifier_to_errno(err);
if (err)
klp_module_going(mod);
return err;
} }
static int unknown_module_param_cb(char *param, char *val, const char *modname, static int unknown_module_param_cb(char *param, char *val, const char *modname,
......
...@@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl, ...@@ -94,6 +94,34 @@ static int notifier_call_chain(struct notifier_block **nl,
} }
NOKPROBE_SYMBOL(notifier_call_chain); NOKPROBE_SYMBOL(notifier_call_chain);
/**
* notifier_call_chain_robust - Inform the registered notifiers about an event
* and rollback on error.
* @nl: Pointer to head of the blocking notifier chain
* @val_up: Value passed unmodified to the notifier function
* @val_down: Value passed unmodified to the notifier function when recovering
* from an error on @val_up
* @v Pointer passed unmodified to the notifier function
*
* NOTE: It is important the @nl chain doesn't change between the two
* invocations of notifier_call_chain() such that we visit the
* exact same notifier callbacks; this rules out any RCU usage.
*
* Returns: the return value of the @val_up call.
*/
static int notifier_call_chain_robust(struct notifier_block **nl,
unsigned long val_up, unsigned long val_down,
void *v)
{
int ret, nr = 0;
ret = notifier_call_chain(nl, val_up, v, -1, &nr);
if (ret & NOTIFY_STOP_MASK)
notifier_call_chain(nl, val_down, v, nr-1, NULL);
return ret;
}
/* /*
* Atomic notifier chain routines. Registration and unregistration * Atomic notifier chain routines. Registration and unregistration
* use a spinlock, and call_chain is synchronized by RCU (no locks). * use a spinlock, and call_chain is synchronized by RCU (no locks).
...@@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, ...@@ -144,13 +172,30 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
} }
EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
unsigned long flags;
int ret;
/*
* Musn't use RCU; because then the notifier list can
* change between the up and down traversal.
*/
spin_lock_irqsave(&nh->lock, flags);
ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
spin_unlock_irqrestore(&nh->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain_robust);
NOKPROBE_SYMBOL(atomic_notifier_call_chain_robust);
/** /**
* __atomic_notifier_call_chain - Call functions in an atomic notifier chain * atomic_notifier_call_chain - Call functions in an atomic notifier chain
* @nh: Pointer to head of the atomic notifier chain * @nh: Pointer to head of the atomic notifier chain
* @val: Value passed unmodified to notifier function * @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function
* @nr_to_call: See the comment for notifier_call_chain.
* @nr_calls: See the comment for notifier_call_chain.
* *
* Calls each function in a notifier chain in turn. The functions * Calls each function in a notifier chain in turn. The functions
* run in an atomic context, so they must not block. * run in an atomic context, so they must not block.
...@@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); ...@@ -163,24 +208,16 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
* Otherwise the return value is the return value * Otherwise the return value is the return value
* of the last notifier function called. * of the last notifier function called.
*/ */
int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
unsigned long val, void *v, unsigned long val, void *v)
int nr_to_call, int *nr_calls)
{ {
int ret; int ret;
rcu_read_lock(); rcu_read_lock();
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
rcu_read_unlock(); rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
NOKPROBE_SYMBOL(__atomic_notifier_call_chain);
int atomic_notifier_call_chain(struct atomic_notifier_head *nh, return ret;
unsigned long val, void *v)
{
return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
} }
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
NOKPROBE_SYMBOL(atomic_notifier_call_chain); NOKPROBE_SYMBOL(atomic_notifier_call_chain);
...@@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, ...@@ -250,13 +287,30 @@ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
} }
EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
int ret = NOTIFY_DONE;
/*
* We check the head outside the lock, but if this access is
* racy then it does not matter what the result of the test
* is, we re-check the list after having taken the lock anyway:
*/
if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem);
ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
up_read(&nh->rwsem);
}
return ret;
}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain_robust);
/** /**
* __blocking_notifier_call_chain - Call functions in a blocking notifier chain * blocking_notifier_call_chain - Call functions in a blocking notifier chain
* @nh: Pointer to head of the blocking notifier chain * @nh: Pointer to head of the blocking notifier chain
* @val: Value passed unmodified to notifier function * @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain.
* *
* Calls each function in a notifier chain in turn. The functions * Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block. * run in a process context, so they are allowed to block.
...@@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); ...@@ -268,9 +322,8 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
* Otherwise the return value is the return value * Otherwise the return value is the return value
* of the last notifier function called. * of the last notifier function called.
*/ */
int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v, unsigned long val, void *v)
int nr_to_call, int *nr_calls)
{ {
int ret = NOTIFY_DONE; int ret = NOTIFY_DONE;
...@@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, ...@@ -281,19 +334,11 @@ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
*/ */
if (rcu_access_pointer(nh->head)) { if (rcu_access_pointer(nh->head)) {
down_read(&nh->rwsem); down_read(&nh->rwsem);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
nr_calls);
up_read(&nh->rwsem); up_read(&nh->rwsem);
} }
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
unsigned long val, void *v)
{
return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
/* /*
...@@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, ...@@ -335,13 +380,18 @@ int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
} }
EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
int raw_notifier_call_chain_robust(struct raw_notifier_head *nh,
unsigned long val_up, unsigned long val_down, void *v)
{
return notifier_call_chain_robust(&nh->head, val_up, val_down, v);
}
EXPORT_SYMBOL_GPL(raw_notifier_call_chain_robust);
/** /**
* __raw_notifier_call_chain - Call functions in a raw notifier chain * raw_notifier_call_chain - Call functions in a raw notifier chain
* @nh: Pointer to head of the raw notifier chain * @nh: Pointer to head of the raw notifier chain
* @val: Value passed unmodified to notifier function * @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain
* *
* Calls each function in a notifier chain in turn. The functions * Calls each function in a notifier chain in turn. The functions
* run in an undefined context. * run in an undefined context.
...@@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); ...@@ -354,18 +404,10 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
* Otherwise the return value is the return value * Otherwise the return value is the return value
* of the last notifier function called. * of the last notifier function called.
*/ */
int __raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v,
int nr_to_call, int *nr_calls)
{
return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
}
EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
int raw_notifier_call_chain(struct raw_notifier_head *nh, int raw_notifier_call_chain(struct raw_notifier_head *nh,
unsigned long val, void *v) unsigned long val, void *v)
{ {
return __raw_notifier_call_chain(nh, val, v, -1, NULL); return notifier_call_chain(&nh->head, val, v, -1, NULL);
} }
EXPORT_SYMBOL_GPL(raw_notifier_call_chain); EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
...@@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, ...@@ -437,12 +479,10 @@ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
/** /**
* __srcu_notifier_call_chain - Call functions in an SRCU notifier chain * srcu_notifier_call_chain - Call functions in an SRCU notifier chain
* @nh: Pointer to head of the SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain
* @val: Value passed unmodified to notifier function * @val: Value passed unmodified to notifier function
* @v: Pointer passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function
* @nr_to_call: See comment for notifier_call_chain.
* @nr_calls: See comment for notifier_call_chain
* *
* Calls each function in a notifier chain in turn. The functions * Calls each function in a notifier chain in turn. The functions
* run in a process context, so they are allowed to block. * run in a process context, so they are allowed to block.
...@@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); ...@@ -454,25 +494,17 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
* Otherwise the return value is the return value * Otherwise the return value is the return value
* of the last notifier function called. * of the last notifier function called.
*/ */
int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v, unsigned long val, void *v)
int nr_to_call, int *nr_calls)
{ {
int ret; int ret;
int idx; int idx;
idx = srcu_read_lock(&nh->srcu); idx = srcu_read_lock(&nh->srcu);
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
srcu_read_unlock(&nh->srcu, idx); srcu_read_unlock(&nh->srcu, idx);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
unsigned long val, void *v)
{
return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
}
EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
/** /**
......
...@@ -706,8 +706,8 @@ static int load_image_and_restore(void) ...@@ -706,8 +706,8 @@ static int load_image_and_restore(void)
*/ */
int hibernate(void) int hibernate(void)
{ {
int error, nr_calls = 0;
bool snapshot_test = false; bool snapshot_test = false;
int error;
if (!hibernation_available()) { if (!hibernation_available()) {
pm_pr_dbg("Hibernation not available.\n"); pm_pr_dbg("Hibernation not available.\n");
...@@ -723,11 +723,9 @@ int hibernate(void) ...@@ -723,11 +723,9 @@ int hibernate(void)
pr_info("hibernation entry\n"); pr_info("hibernation entry\n");
pm_prepare_console(); pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
if (error) { if (error)
nr_calls--; goto Restore;
goto Exit;
}
ksys_sync_helper(); ksys_sync_helper();
...@@ -785,7 +783,8 @@ int hibernate(void) ...@@ -785,7 +783,8 @@ int hibernate(void)
/* Don't bother checking whether freezer_test_done is true */ /* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false; freezer_test_done = false;
Exit: Exit:
__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); pm_notifier_call_chain(PM_POST_HIBERNATION);
Restore:
pm_restore_console(); pm_restore_console();
hibernate_release(); hibernate_release();
Unlock: Unlock:
...@@ -804,7 +803,7 @@ int hibernate(void) ...@@ -804,7 +803,7 @@ int hibernate(void)
*/ */
int hibernate_quiet_exec(int (*func)(void *data), void *data) int hibernate_quiet_exec(int (*func)(void *data), void *data)
{ {
int error, nr_calls = 0; int error;
lock_system_sleep(); lock_system_sleep();
...@@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data) ...@@ -815,11 +814,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
pm_prepare_console(); pm_prepare_console();
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
if (error) { if (error)
nr_calls--; goto restore;
goto exit;
}
error = freeze_processes(); error = freeze_processes();
if (error) if (error)
...@@ -880,8 +877,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data) ...@@ -880,8 +877,9 @@ int hibernate_quiet_exec(int (*func)(void *data), void *data)
thaw_processes(); thaw_processes();
exit: exit:
__pm_notifier_call_chain(PM_POST_HIBERNATION, nr_calls, NULL); pm_notifier_call_chain(PM_POST_HIBERNATION);
restore:
pm_restore_console(); pm_restore_console();
hibernate_release(); hibernate_release();
...@@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec); ...@@ -910,7 +908,7 @@ EXPORT_SYMBOL_GPL(hibernate_quiet_exec);
*/ */
static int software_resume(void) static int software_resume(void)
{ {
int error, nr_calls = 0; int error;
/* /*
* If the user said "noresume".. bail out early. * If the user said "noresume".. bail out early.
...@@ -997,11 +995,9 @@ static int software_resume(void) ...@@ -997,11 +995,9 @@ static int software_resume(void)
pr_info("resume from hibernation\n"); pr_info("resume from hibernation\n");
pm_prepare_console(); pm_prepare_console();
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
if (error) { if (error)
nr_calls--; goto Restore;
goto Close_Finish;
}
pm_pr_dbg("Preparing processes for hibernation restore.\n"); pm_pr_dbg("Preparing processes for hibernation restore.\n");
error = freeze_processes(); error = freeze_processes();
...@@ -1017,7 +1013,8 @@ static int software_resume(void) ...@@ -1017,7 +1013,8 @@ static int software_resume(void)
error = load_image_and_restore(); error = load_image_and_restore();
thaw_processes(); thaw_processes();
Finish: Finish:
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); pm_notifier_call_chain(PM_POST_RESTORE);
Restore:
pm_restore_console(); pm_restore_console();
pr_info("resume failed (%d)\n", error); pr_info("resume failed (%d)\n", error);
hibernate_release(); hibernate_release();
......
...@@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb) ...@@ -80,18 +80,18 @@ int unregister_pm_notifier(struct notifier_block *nb)
} }
EXPORT_SYMBOL_GPL(unregister_pm_notifier); EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int __pm_notifier_call_chain(unsigned long val, int nr_to_call, int *nr_calls) int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
{ {
int ret; int ret;
ret = __blocking_notifier_call_chain(&pm_chain_head, val, NULL, ret = blocking_notifier_call_chain_robust(&pm_chain_head, val_up, val_down, NULL);
nr_to_call, nr_calls);
return notifier_to_errno(ret); return notifier_to_errno(ret);
} }
int pm_notifier_call_chain(unsigned long val) int pm_notifier_call_chain(unsigned long val)
{ {
return __pm_notifier_call_chain(val, -1, NULL); return blocking_notifier_call_chain(&pm_chain_head, val, NULL);
} }
/* If set, devices may be suspended and resumed asynchronously. */ /* If set, devices may be suspended and resumed asynchronously. */
......
...@@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {} ...@@ -210,8 +210,7 @@ static inline void suspend_test_finish(const char *label) {}
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
/* kernel/power/main.c */ /* kernel/power/main.c */
extern int __pm_notifier_call_chain(unsigned long val, int nr_to_call, extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
int *nr_calls);
extern int pm_notifier_call_chain(unsigned long val); extern int pm_notifier_call_chain(unsigned long val);
#endif #endif
......
...@@ -342,18 +342,16 @@ static int suspend_test(int level) ...@@ -342,18 +342,16 @@ static int suspend_test(int level)
*/ */
static int suspend_prepare(suspend_state_t state) static int suspend_prepare(suspend_state_t state)
{ {
int error, nr_calls = 0; int error;
if (!sleep_state_supported(state)) if (!sleep_state_supported(state))
return -EPERM; return -EPERM;
pm_prepare_console(); pm_prepare_console();
error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_SUSPEND_PREPARE, PM_POST_SUSPEND);
if (error) { if (error)
nr_calls--; goto Restore;
goto Finish;
}
trace_suspend_resume(TPS("freeze_processes"), 0, true); trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes(); error = suspend_freeze_processes();
...@@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state) ...@@ -363,8 +361,8 @@ static int suspend_prepare(suspend_state_t state)
suspend_stats.failed_freeze++; suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE); dpm_save_failed_step(SUSPEND_FREEZE);
Finish: pm_notifier_call_chain(PM_POST_SUSPEND);
__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL); Restore:
pm_restore_console(); pm_restore_console();
return error; return error;
} }
......
...@@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode) ...@@ -46,7 +46,7 @@ int is_hibernate_resume_dev(const struct inode *bd_inode)
static int snapshot_open(struct inode *inode, struct file *filp) static int snapshot_open(struct inode *inode, struct file *filp)
{ {
struct snapshot_data *data; struct snapshot_data *data;
int error, nr_calls = 0; int error;
if (!hibernation_available()) if (!hibernation_available())
return -EPERM; return -EPERM;
...@@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp) ...@@ -73,9 +73,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
swap_type_of(swsusp_resume_device, 0, NULL) : -1; swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY; data->mode = O_RDONLY;
data->free_bitmaps = false; data->free_bitmaps = false;
error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
if (error)
__pm_notifier_call_chain(PM_POST_HIBERNATION, --nr_calls, NULL);
} else { } else {
/* /*
* Resuming. We may need to wait for the image device to * Resuming. We may need to wait for the image device to
...@@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp) ...@@ -85,15 +83,11 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1; data->swap = -1;
data->mode = O_WRONLY; data->mode = O_WRONLY;
error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
if (!error) { if (!error) {
error = create_basic_memory_bitmaps(); error = create_basic_memory_bitmaps();
data->free_bitmaps = !error; data->free_bitmaps = !error;
} else }
nr_calls--;
if (error)
__pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL);
} }
if (error) if (error)
hibernate_release(); hibernate_release();
......
This diff is collapsed.
...@@ -2027,10 +2027,11 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op, ...@@ -2027,10 +2027,11 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
{ {
struct bpf_trace_module *btm, *tmp; struct bpf_trace_module *btm, *tmp;
struct module *mod = module; struct module *mod = module;
int ret = 0;
if (mod->num_bpf_raw_events == 0 || if (mod->num_bpf_raw_events == 0 ||
(op != MODULE_STATE_COMING && op != MODULE_STATE_GOING)) (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
return 0; goto out;
mutex_lock(&bpf_module_mutex); mutex_lock(&bpf_module_mutex);
...@@ -2040,6 +2041,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op, ...@@ -2040,6 +2041,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
if (btm) { if (btm) {
btm->module = module; btm->module = module;
list_add(&btm->list, &bpf_trace_modules); list_add(&btm->list, &bpf_trace_modules);
} else {
ret = -ENOMEM;
} }
break; break;
case MODULE_STATE_GOING: case MODULE_STATE_GOING:
...@@ -2055,7 +2058,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op, ...@@ -2055,7 +2058,8 @@ static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
mutex_unlock(&bpf_module_mutex); mutex_unlock(&bpf_module_mutex);
return 0; out:
return notifier_from_errno(ret);
} }
static struct notifier_block bpf_module_nb = { static struct notifier_block bpf_module_nb = {
......
...@@ -9074,7 +9074,7 @@ static int trace_module_notify(struct notifier_block *self, ...@@ -9074,7 +9074,7 @@ static int trace_module_notify(struct notifier_block *self,
break; break;
} }
return 0; return NOTIFY_OK;
} }
static struct notifier_block trace_module_nb = { static struct notifier_block trace_module_nb = {
......
...@@ -2646,7 +2646,7 @@ static int trace_module_notify(struct notifier_block *self, ...@@ -2646,7 +2646,7 @@ static int trace_module_notify(struct notifier_block *self,
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
return 0; return NOTIFY_OK;
} }
static struct notifier_block trace_module_nb = { static struct notifier_block trace_module_nb = {
......
...@@ -96,7 +96,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self, ...@@ -96,7 +96,7 @@ static int module_trace_bprintk_format_notify(struct notifier_block *self,
if (val == MODULE_STATE_COMING) if (val == MODULE_STATE_COMING)
hold_module_trace_bprintk_format(start, end); hold_module_trace_bprintk_format(start, end);
} }
return 0; return NOTIFY_OK;
} }
/* /*
...@@ -174,7 +174,7 @@ __init static int ...@@ -174,7 +174,7 @@ __init static int
module_trace_bprintk_format_notify(struct notifier_block *self, module_trace_bprintk_format_notify(struct notifier_block *self,
unsigned long val, void *data) unsigned long val, void *data)
{ {
return 0; return NOTIFY_OK;
} }
static inline const char ** static inline const char **
find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos) find_next_mod_format(int start_index, void *v, const char **fmt, loff_t *pos)
......
...@@ -221,6 +221,29 @@ static void *func_remove(struct tracepoint_func **funcs, ...@@ -221,6 +221,29 @@ static void *func_remove(struct tracepoint_func **funcs,
return old; return old;
} }
static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func *tp_funcs, bool sync)
{
void *func = tp->iterator;
/* Synthetic events do not have static call sites */
if (!tp->static_call_key)
return;
if (!tp_funcs[1].func) {
func = tp_funcs[0].func;
/*
* If going from the iterator back to a single caller,
* we need to synchronize with __DO_TRACE to make sure
* that the data passed to the callback is the one that
* belongs to that callback.
*/
if (sync)
tracepoint_synchronize_unregister();
}
__static_call_update(tp->static_call_key, tp->static_call_tramp, func);
}
/* /*
* Add the probe function to a tracepoint. * Add the probe function to a tracepoint.
*/ */
...@@ -251,8 +274,9 @@ static int tracepoint_add_func(struct tracepoint *tp, ...@@ -251,8 +274,9 @@ static int tracepoint_add_func(struct tracepoint *tp,
* include/linux/tracepoint.h using rcu_dereference_sched(). * include/linux/tracepoint.h using rcu_dereference_sched().
*/ */
rcu_assign_pointer(tp->funcs, tp_funcs); rcu_assign_pointer(tp->funcs, tp_funcs);
if (!static_key_enabled(&tp->key)) tracepoint_update_call(tp, tp_funcs, false);
static_key_slow_inc(&tp->key); static_key_enable(&tp->key);
release_probes(old); release_probes(old);
return 0; return 0;
} }
...@@ -281,10 +305,13 @@ static int tracepoint_remove_func(struct tracepoint *tp, ...@@ -281,10 +305,13 @@ static int tracepoint_remove_func(struct tracepoint *tp,
if (tp->unregfunc && static_key_enabled(&tp->key)) if (tp->unregfunc && static_key_enabled(&tp->key))
tp->unregfunc(); tp->unregfunc();
if (static_key_enabled(&tp->key)) static_key_disable(&tp->key);
static_key_slow_dec(&tp->key); rcu_assign_pointer(tp->funcs, tp_funcs);
} else {
rcu_assign_pointer(tp->funcs, tp_funcs);
tracepoint_update_call(tp, tp_funcs,
tp_funcs[0].func != old[0].func);
} }
rcu_assign_pointer(tp->funcs, tp_funcs);
release_probes(old); release_probes(old);
return 0; return 0;
} }
...@@ -521,7 +548,7 @@ static int tracepoint_module_notify(struct notifier_block *self, ...@@ -521,7 +548,7 @@ static int tracepoint_module_notify(struct notifier_block *self,
case MODULE_STATE_UNFORMED: case MODULE_STATE_UNFORMED:
break; break;
} }
return ret; return notifier_from_errno(ret);
} }
static struct notifier_block tracepoint_module_nb = { static struct notifier_block tracepoint_module_nb = {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _STATIC_CALL_TYPES_H
#define _STATIC_CALL_TYPES_H
#include <linux/types.h>
#include <linux/stringify.h>
#define STATIC_CALL_KEY_PREFIX __SCK__
#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
#define STATIC_CALL_TRAMP_PREFIX __SCT__
#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
/*
* Flags in the low bits of static_call_site::key.
*/
#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
*/
struct static_call_site {
s32 addr;
s32 key;
};
#endif /* _STATIC_CALL_TYPES_H */
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/hashtable.h> #include <linux/hashtable.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/static_call_types.h>
#define FAKE_JUMP_OFFSET -1 #define FAKE_JUMP_OFFSET -1
...@@ -433,6 +434,103 @@ static int add_dead_ends(struct objtool_file *file) ...@@ -433,6 +434,103 @@ static int add_dead_ends(struct objtool_file *file)
return 0; return 0;
} }
static int create_static_call_sections(struct objtool_file *file)
{
struct section *sec, *reloc_sec;
struct reloc *reloc;
struct static_call_site *site;
struct instruction *insn;
struct symbol *key_sym;
char *key_name, *tmp;
int idx;
sec = find_section_by_name(file->elf, ".static_call_sites");
if (sec) {
INIT_LIST_HEAD(&file->static_call_list);
WARN("file already has .static_call_sites section, skipping");
return 0;
}
if (list_empty(&file->static_call_list))
return 0;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, static_call_node)
idx++;
sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
sizeof(struct static_call_site), idx);
if (!sec)
return -1;
reloc_sec = elf_create_reloc_section(file->elf, sec, SHT_RELA);
if (!reloc_sec)
return -1;
idx = 0;
list_for_each_entry(insn, &file->static_call_list, static_call_node) {
site = (struct static_call_site *)sec->data->d_buf + idx;
memset(site, 0, sizeof(struct static_call_site));
/* populate reloc for 'addr' */
reloc = malloc(sizeof(*reloc));
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
reloc->sym = insn->sec->sym;
reloc->addend = insn->offset;
reloc->type = R_X86_64_PC32;
reloc->offset = idx * sizeof(struct static_call_site);
reloc->sec = reloc_sec;
elf_add_reloc(file->elf, reloc);
/* find key symbol */
key_name = strdup(insn->call_dest->name);
if (!key_name) {
perror("strdup");
return -1;
}
if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
STATIC_CALL_TRAMP_PREFIX_LEN)) {
WARN("static_call: trampoline name malformed: %s", key_name);
return -1;
}
tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
key_sym = find_symbol_by_name(file->elf, tmp);
if (!key_sym) {
WARN("static_call: can't find static_call_key symbol: %s", tmp);
return -1;
}
free(key_name);
/* populate reloc for 'key' */
reloc = malloc(sizeof(*reloc));
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
reloc->sym = key_sym;
reloc->addend = is_sibling_call(insn) ? STATIC_CALL_SITE_TAIL : 0;
reloc->type = R_X86_64_PC32;
reloc->offset = idx * sizeof(struct static_call_site) + 4;
reloc->sec = reloc_sec;
elf_add_reloc(file->elf, reloc);
idx++;
}
if (elf_rebuild_reloc_section(file->elf, reloc_sec))
return -1;
return 0;
}
/* /*
* Warnings shouldn't be reported for ignored functions. * Warnings shouldn't be reported for ignored functions.
*/ */
...@@ -705,6 +803,10 @@ static int add_jump_destinations(struct objtool_file *file) ...@@ -705,6 +803,10 @@ static int add_jump_destinations(struct objtool_file *file)
} else { } else {
/* external sibling call */ /* external sibling call */
insn->call_dest = reloc->sym; insn->call_dest = reloc->sym;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
continue; continue;
} }
...@@ -756,6 +858,10 @@ static int add_jump_destinations(struct objtool_file *file) ...@@ -756,6 +858,10 @@ static int add_jump_destinations(struct objtool_file *file)
/* internal sibling call */ /* internal sibling call */
insn->call_dest = insn->jump_dest->func; insn->call_dest = insn->jump_dest->func;
if (insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
} }
} }
} }
...@@ -1578,6 +1684,23 @@ static int read_intra_function_calls(struct objtool_file *file) ...@@ -1578,6 +1684,23 @@ static int read_intra_function_calls(struct objtool_file *file)
return 0; return 0;
} }
static int read_static_call_tramps(struct objtool_file *file)
{
struct section *sec;
struct symbol *func;
for_each_sec(file, sec) {
list_for_each_entry(func, &sec->symbol_list, list) {
if (func->bind == STB_GLOBAL &&
!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
func->static_call_tramp = true;
}
}
return 0;
}
static void mark_rodata(struct objtool_file *file) static void mark_rodata(struct objtool_file *file)
{ {
struct section *sec; struct section *sec;
...@@ -1625,6 +1748,10 @@ static int decode_sections(struct objtool_file *file) ...@@ -1625,6 +1748,10 @@ static int decode_sections(struct objtool_file *file)
if (ret) if (ret)
return ret; return ret;
ret = read_static_call_tramps(file);
if (ret)
return ret;
ret = add_jump_destinations(file); ret = add_jump_destinations(file);
if (ret) if (ret)
return ret; return ret;
...@@ -2488,6 +2615,11 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, ...@@ -2488,6 +2615,11 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
if (dead_end_function(file, insn->call_dest)) if (dead_end_function(file, insn->call_dest))
return 0; return 0;
if (insn->type == INSN_CALL && insn->call_dest->static_call_tramp) {
list_add_tail(&insn->static_call_node,
&file->static_call_list);
}
break; break;
case INSN_JUMP_CONDITIONAL: case INSN_JUMP_CONDITIONAL:
...@@ -2847,6 +2979,7 @@ int check(const char *_objname, bool orc) ...@@ -2847,6 +2979,7 @@ int check(const char *_objname, bool orc)
INIT_LIST_HEAD(&file.insn_list); INIT_LIST_HEAD(&file.insn_list);
hash_init(file.insn_hash); hash_init(file.insn_hash);
INIT_LIST_HEAD(&file.static_call_list);
file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment"); file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
file.ignore_unreachables = no_unreachable; file.ignore_unreachables = no_unreachable;
file.hints = false; file.hints = false;
...@@ -2894,6 +3027,11 @@ int check(const char *_objname, bool orc) ...@@ -2894,6 +3027,11 @@ int check(const char *_objname, bool orc)
warnings += ret; warnings += ret;
} }
ret = create_static_call_sections(&file);
if (ret < 0)
goto out;
warnings += ret;
if (orc) { if (orc) {
ret = create_orc(&file); ret = create_orc(&file);
if (ret < 0) if (ret < 0)
......
...@@ -22,6 +22,7 @@ struct insn_state { ...@@ -22,6 +22,7 @@ struct insn_state {
struct instruction { struct instruction {
struct list_head list; struct list_head list;
struct hlist_node hash; struct hlist_node hash;
struct list_head static_call_node;
struct section *sec; struct section *sec;
unsigned long offset; unsigned long offset;
unsigned int len; unsigned int len;
......
...@@ -652,7 +652,7 @@ struct elf *elf_open_read(const char *name, int flags) ...@@ -652,7 +652,7 @@ struct elf *elf_open_read(const char *name, int flags)
} }
struct section *elf_create_section(struct elf *elf, const char *name, struct section *elf_create_section(struct elf *elf, const char *name,
size_t entsize, int nr) unsigned int sh_flags, size_t entsize, int nr)
{ {
struct section *sec, *shstrtab; struct section *sec, *shstrtab;
size_t size = entsize * nr; size_t size = entsize * nr;
...@@ -712,7 +712,7 @@ struct section *elf_create_section(struct elf *elf, const char *name, ...@@ -712,7 +712,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_entsize = entsize; sec->sh.sh_entsize = entsize;
sec->sh.sh_type = SHT_PROGBITS; sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_addralign = 1; sec->sh.sh_addralign = 1;
sec->sh.sh_flags = SHF_ALLOC; sec->sh.sh_flags = SHF_ALLOC | sh_flags;
/* Add section name to .shstrtab (or .strtab for Clang) */ /* Add section name to .shstrtab (or .strtab for Clang) */
...@@ -767,7 +767,7 @@ static struct section *elf_create_rel_reloc_section(struct elf *elf, struct sect ...@@ -767,7 +767,7 @@ static struct section *elf_create_rel_reloc_section(struct elf *elf, struct sect
strcpy(relocname, ".rel"); strcpy(relocname, ".rel");
strcat(relocname, base->name); strcat(relocname, base->name);
sec = elf_create_section(elf, relocname, sizeof(GElf_Rel), 0); sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rel), 0);
free(relocname); free(relocname);
if (!sec) if (!sec)
return NULL; return NULL;
...@@ -797,7 +797,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec ...@@ -797,7 +797,7 @@ static struct section *elf_create_rela_reloc_section(struct elf *elf, struct sec
strcpy(relocname, ".rela"); strcpy(relocname, ".rela");
strcat(relocname, base->name); strcat(relocname, base->name);
sec = elf_create_section(elf, relocname, sizeof(GElf_Rela), 0); sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
free(relocname); free(relocname);
if (!sec) if (!sec)
return NULL; return NULL;
......
...@@ -56,6 +56,7 @@ struct symbol { ...@@ -56,6 +56,7 @@ struct symbol {
unsigned int len; unsigned int len;
struct symbol *pfunc, *cfunc, *alias; struct symbol *pfunc, *cfunc, *alias;
bool uaccess_safe; bool uaccess_safe;
bool static_call_tramp;
}; };
struct reloc { struct reloc {
...@@ -120,7 +121,7 @@ static inline u32 reloc_hash(struct reloc *reloc) ...@@ -120,7 +121,7 @@ static inline u32 reloc_hash(struct reloc *reloc)
} }
struct elf *elf_open_read(const char *name, int flags); struct elf *elf_open_read(const char *name, int flags);
struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr); struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype); struct section *elf_create_reloc_section(struct elf *elf, struct section *base, int reltype);
void elf_add_reloc(struct elf *elf, struct reloc *reloc); void elf_add_reloc(struct elf *elf, struct reloc *reloc);
int elf_write_insn(struct elf *elf, struct section *sec, int elf_write_insn(struct elf *elf, struct section *sec,
......
...@@ -16,6 +16,7 @@ struct objtool_file { ...@@ -16,6 +16,7 @@ struct objtool_file {
struct elf *elf; struct elf *elf;
struct list_head insn_list; struct list_head insn_list;
DECLARE_HASHTABLE(insn_hash, 20); DECLARE_HASHTABLE(insn_hash, 20);
struct list_head static_call_list;
bool ignore_unreachables, c_file, hints, rodata; bool ignore_unreachables, c_file, hints, rodata;
}; };
......
...@@ -177,7 +177,7 @@ int create_orc_sections(struct objtool_file *file) ...@@ -177,7 +177,7 @@ int create_orc_sections(struct objtool_file *file)
/* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), idx);
if (!sec) if (!sec)
return -1; return -1;
...@@ -186,7 +186,7 @@ int create_orc_sections(struct objtool_file *file) ...@@ -186,7 +186,7 @@ int create_orc_sections(struct objtool_file *file)
return -1; return -1;
/* create .orc_unwind section */ /* create .orc_unwind section */
u_sec = elf_create_section(file->elf, ".orc_unwind", u_sec = elf_create_section(file->elf, ".orc_unwind", 0,
sizeof(struct orc_entry), idx); sizeof(struct orc_entry), idx);
/* populate sections */ /* populate sections */
......
...@@ -7,6 +7,7 @@ arch/x86/include/asm/orc_types.h ...@@ -7,6 +7,7 @@ arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk arch/x86/tools/gen-insn-attr-x86.awk
include/linux/static_call_types.h
' '
check_2 () { check_2 () {
......
...@@ -171,7 +171,7 @@ class SystemValues: ...@@ -171,7 +171,7 @@ class SystemValues:
tracefuncs = { tracefuncs = {
'sys_sync': {}, 'sys_sync': {},
'ksys_sync': {}, 'ksys_sync': {},
'__pm_notifier_call_chain': {}, 'pm_notifier_call_chain_robust': {},
'pm_prepare_console': {}, 'pm_prepare_console': {},
'pm_notifier_call_chain': {}, 'pm_notifier_call_chain': {},
'freeze_processes': {}, 'freeze_processes': {},
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment