Commit 1436c1aa authored by Christoph Lameter's avatar Christoph Lameter Committed by Russell King

ARM: 7862/1: pcpu: replace __get_cpu_var_uses

This is the ARM part of Christoph's patchset cleaning up the various
uses of __get_cpu_var across the tree.

The idea is to convert __get_cpu_var into either an explicit address
calculation using this_cpu_ptr() or into a use of this_cpu operations
that use the offset. Thereby address calculations are avoided and fewer
registers are used when code is generated.

[will: fixed debug ref counting checks and pcpu array accesses]
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 39792c7c
...@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp) ...@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */ /* Breakpoint */
ctrl_base = ARM_BASE_BCR; ctrl_base = ARM_BASE_BCR;
val_base = ARM_BASE_BVR; val_base = ARM_BASE_BVR;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
} else { } else {
/* Watchpoint */ /* Watchpoint */
ctrl_base = ARM_BASE_WCR; ctrl_base = ARM_BASE_WCR;
val_base = ARM_BASE_WVR; val_base = ARM_BASE_WVR;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
} }
...@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) ...@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */ /* Breakpoint */
base = ARM_BASE_BCR; base = ARM_BASE_BCR;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
} else { } else {
/* Watchpoint */ /* Watchpoint */
base = ARM_BASE_WCR; base = ARM_BASE_WCR;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
} }
...@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, ...@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl; struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) { for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock(); rcu_read_lock();
...@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc) ...@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
struct perf_event *wp, **slots; struct perf_event *wp, **slots;
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < core_num_wrps; ++i) { for (i = 0; i < core_num_wrps; ++i) {
rcu_read_lock(); rcu_read_lock();
...@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs) ...@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl; struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
/* The exception entry code places the amended lr in the PC. */ /* The exception entry code places the amended lr in the PC. */
addr = regs->ARM_pc; addr = regs->ARM_pc;
......
...@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) ...@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{ {
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_status = kcb->prev_kprobe.status;
} }
static void __kprobes set_current_kprobe(struct kprobe *p) static void __kprobes set_current_kprobe(struct kprobe *p)
{ {
__get_cpu_var(current_kprobe) = p; __this_cpu_write(current_kprobe, p);
} }
static void __kprobes static void __kprobes
...@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) ...@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
continue; continue;
if (ri->rp && ri->rp->handler) { if (ri->rp && ri->rp->handler) {
__get_cpu_var(current_kprobe) = &ri->rp->kp; __this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs); ri->rp->handler(ri, regs);
__get_cpu_var(current_kprobe) = NULL; __this_cpu_write(current_kprobe, NULL);
} }
orig_ret_address = (unsigned long)ri->ret_addr; orig_ret_address = (unsigned long)ri->ret_addr;
......
...@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters); ...@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
{ {
return &__get_cpu_var(cpu_hw_events); return this_cpu_ptr(&cpu_hw_events);
} }
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
......
...@@ -65,7 +65,7 @@ static bool vgic_present; ...@@ -65,7 +65,7 @@ static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
__get_cpu_var(kvm_arm_running_vcpu) = vcpu; __this_cpu_write(kvm_arm_running_vcpu, vcpu);
} }
/** /**
...@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) ...@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu *kvm_arm_get_running_vcpu(void) struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
return __get_cpu_var(kvm_arm_running_vcpu); return __this_cpu_read(kvm_arm_running_vcpu);
} }
/** /**
...@@ -815,7 +815,7 @@ static void cpu_init_hyp_mode(void *dummy) ...@@ -815,7 +815,7 @@ static void cpu_init_hyp_mode(void *dummy)
boot_pgd_ptr = kvm_mmu_get_boot_httbr(); boot_pgd_ptr = kvm_mmu_get_boot_httbr();
pgd_ptr = kvm_mmu_get_httbr(); pgd_ptr = kvm_mmu_get_httbr();
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE; hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector; vector_ptr = (unsigned long)__kvm_hyp_vector;
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/local.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
...@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable); ...@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core. * Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type. * The ref counts are per-cpu so we use a local_t type.
*/ */
static DEFINE_PER_CPU(local_t, mde_ref_count); static DEFINE_PER_CPU(int, mde_ref_count);
static DEFINE_PER_CPU(local_t, kde_ref_count); static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum debug_el el) void enable_debug_monitors(enum debug_el el)
{ {
...@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el) ...@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON(preemptible()); WARN_ON(preemptible());
if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) if (this_cpu_inc_return(mde_ref_count) == 1)
enable = DBG_MDSCR_MDE; enable = DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 && if (el == DBG_ACTIVE_EL1 &&
local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) this_cpu_inc_return(kde_ref_count) == 1)
enable |= DBG_MDSCR_KDE; enable |= DBG_MDSCR_KDE;
if (enable && debug_enabled) { if (enable && debug_enabled) {
...@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el) ...@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON(preemptible()); WARN_ON(preemptible());
if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) if (this_cpu_dec_return(mde_ref_count) == 0)
disable = ~DBG_MDSCR_MDE; disable = ~DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 && if (el == DBG_ACTIVE_EL1 &&
local_dec_and_test(&__get_cpu_var(kde_ref_count))) this_cpu_dec_return(kde_ref_count) == 0)
disable &= ~DBG_MDSCR_KDE; disable &= ~DBG_MDSCR_KDE;
if (disable) { if (disable) {
......
...@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp) ...@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */ /* Breakpoint */
ctrl_reg = AARCH64_DBG_REG_BCR; ctrl_reg = AARCH64_DBG_REG_BCR;
val_reg = AARCH64_DBG_REG_BVR; val_reg = AARCH64_DBG_REG_BVR;
slots = __get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
reg_enable = !debug_info->bps_disabled; reg_enable = !debug_info->bps_disabled;
} else { } else {
/* Watchpoint */ /* Watchpoint */
ctrl_reg = AARCH64_DBG_REG_WCR; ctrl_reg = AARCH64_DBG_REG_WCR;
val_reg = AARCH64_DBG_REG_WVR; val_reg = AARCH64_DBG_REG_WVR;
slots = __get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
reg_enable = !debug_info->wps_disabled; reg_enable = !debug_info->wps_disabled;
} }
...@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) ...@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */ /* Breakpoint */
base = AARCH64_DBG_REG_BCR; base = AARCH64_DBG_REG_BCR;
slots = __get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
} else { } else {
/* Watchpoint */ /* Watchpoint */
base = AARCH64_DBG_REG_WCR; base = AARCH64_DBG_REG_WCR;
slots = __get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
} }
...@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable) ...@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch (reg) { switch (reg) {
case AARCH64_DBG_REG_BCR: case AARCH64_DBG_REG_BCR:
slots = __get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps; max_slots = core_num_brps;
break; break;
case AARCH64_DBG_REG_WCR: case AARCH64_DBG_REG_WCR:
slots = __get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps; max_slots = core_num_wrps;
break; break;
default: default:
...@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr, ...@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct debug_info *debug_info; struct debug_info *debug_info;
struct arch_hw_breakpoint_ctrl ctrl; struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(bp_on_reg); slots = this_cpu_ptr(bp_on_reg);
addr = instruction_pointer(regs); addr = instruction_pointer(regs);
debug_info = &current->thread.debug; debug_info = &current->thread.debug;
...@@ -596,7 +596,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr, ...@@ -596,7 +596,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
user_enable_single_step(current); user_enable_single_step(current);
} else { } else {
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
kernel_step = &__get_cpu_var(stepping_kernel_bp); kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE) if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0; return 0;
...@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, ...@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct arch_hw_breakpoint *info; struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl; struct arch_hw_breakpoint_ctrl ctrl;
slots = (struct perf_event **)__get_cpu_var(wp_on_reg); slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug; debug_info = &current->thread.debug;
for (i = 0; i < core_num_wrps; ++i) { for (i = 0; i < core_num_wrps; ++i) {
...@@ -698,7 +698,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, ...@@ -698,7 +698,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
user_enable_single_step(current); user_enable_single_step(current);
} else { } else {
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
kernel_step = &__get_cpu_var(stepping_kernel_bp); kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE) if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0; return 0;
...@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs) ...@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct debug_info *debug_info = &current->thread.debug; struct debug_info *debug_info = &current->thread.debug;
int handled_exception = 0, *kernel_step; int handled_exception = 0, *kernel_step;
kernel_step = &__get_cpu_var(stepping_kernel_bp); kernel_step = this_cpu_ptr(&stepping_kernel_bp);
/* /*
* Called from single-step exception handler. * Called from single-step exception handler.
......
...@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) ...@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/ */
regs = get_irq_regs(); regs = get_irq_regs();
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) { for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx]; struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
...@@ -1257,7 +1257,7 @@ device_initcall(register_pmu_driver); ...@@ -1257,7 +1257,7 @@ device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void) static struct pmu_hw_events *armpmu_get_cpu_events(void)
{ {
return &__get_cpu_var(cpu_hw_events); return this_cpu_ptr(&cpu_hw_events);
} }
static void __init cpu_pmu_init(struct arm_pmu *armpmu) static void __init cpu_pmu_init(struct arm_pmu *armpmu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment