Commit 09a07294 authored by Paul Mundt's avatar Paul Mundt

sh: hw-breakpoints: Add preliminary support for SH-4A UBC.

This adds preliminary support for the SH-4A UBC to the hw-breakpoints API.
Presently only a single channel is implemented, and the ptrace interface
still needs to be converted. This is the first step to cleaning up the
long-standing UBC mess, making the UBC more generally accessible, and
finally making it SMP safe.

An additional abstraction will be layered on top of this as with the perf
events code to permit the various CPU families to wire up support for
their own specific UBCs, as many variations exist.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 6ec22f9b
...@@ -39,6 +39,7 @@ config SUPERH32 ...@@ -39,6 +39,7 @@ config SUPERH32
select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACE_MCOUNT_TEST
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_HW_BREAKPOINT if CPU_SH4A
select ARCH_HIBERNATION_POSSIBLE if MMU select ARCH_HIBERNATION_POSSIBLE if MMU
config SUPERH64 config SUPERH64
......
include include/asm-generic/Kbuild.asm include include/asm-generic/Kbuild.asm
header-y += cachectl.h cpu-features.h header-y += cachectl.h
header-y += cpu-features.h
header-y += hw_breakpoint.h
unifdef-y += unistd_32.h unifdef-y += unistd_32.h
unifdef-y += unistd_64.h unifdef-y += unistd_64.h
......
#ifndef __ASM_SH_HW_BREAKPOINT_H
#define __ASM_SH_HW_BREAKPOINT_H
#include <linux/kdebug.h>
#include <linux/types.h>
#include <asm/ubc.h>
#ifdef __KERNEL__
#define __ARCH_HW_BREAKPOINT_H
struct arch_hw_breakpoint {
char *name; /* Contains name of the symbol to set bkpt */
unsigned long address;
unsigned long asid;
u16 len;
u16 type;
};
enum {
SH_BREAKPOINT_READ = (1 << 1),
SH_BREAKPOINT_WRITE = (1 << 2),
SH_BREAKPOINT_RW = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
SH_BREAKPOINT_LEN_1 = (1 << 12),
SH_BREAKPOINT_LEN_2 = (1 << 13),
SH_BREAKPOINT_LEN_4 = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
SH_BREAKPOINT_LEN_8 = (1 << 14),
};
/* Total number of available UBC channels */
#define HBP_NUM 1 /* XXX */
struct perf_event;
struct task_struct;
struct pmu;
extern int arch_check_va_in_userspace(unsigned long va, u16 hbp_len);
extern int arch_validate_hwbkpt_settings(struct perf_event *bp,
struct task_struct *tsk);
extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
void hw_breakpoint_pmu_unthrottle(struct perf_event *bp);
extern void arch_fill_perf_breakpoint(struct perf_event *bp);
extern struct pmu perf_ops_bp;
#endif /* __KERNEL__ */
#endif /* __ASM_SH_HW_BREAKPOINT_H */
...@@ -6,6 +6,8 @@ enum die_val { ...@@ -6,6 +6,8 @@ enum die_val {
DIE_TRAP, DIE_TRAP,
DIE_NMI, DIE_NMI,
DIE_OOPS, DIE_OOPS,
DIE_BREAKPOINT,
DIE_SSTEP,
}; };
#endif /* __ASM_SH_KDEBUG_H */ #endif /* __ASM_SH_KDEBUG_H */
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ubc.h>
/* /*
* Default implementation of macro that returns current * Default implementation of macro that returns current
...@@ -99,8 +100,8 @@ struct thread_struct { ...@@ -99,8 +100,8 @@ struct thread_struct {
unsigned long sp; unsigned long sp;
unsigned long pc; unsigned long pc;
/* Hardware debugging registers */ /* Save middle states of ptrace breakpoints */
unsigned long ubc_pc; struct perf_event *ptrace_bps[NR_UBC_CHANNELS];
/* floating point info */ /* floating point info */
union sh_fpu_union fpu; union sh_fpu_union fpu;
...@@ -111,9 +112,6 @@ struct thread_struct { ...@@ -111,9 +112,6 @@ struct thread_struct {
#endif #endif
}; };
/* Count of active tasks with UBC settings */
extern int ubc_usercnt;
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp = sizeof(init_stack) + (long) &init_stack, \ .sp = sizeof(init_stack) + (long) &init_stack, \
} }
......
...@@ -144,8 +144,6 @@ void per_cpu_trap_init(void); ...@@ -144,8 +144,6 @@ void per_cpu_trap_init(void);
void default_idle(void); void default_idle(void);
void cpu_idle_wait(void); void cpu_idle_wait(void);
asmlinkage void break_point_trap(void);
#ifdef CONFIG_SUPERH32 #ifdef CONFIG_SUPERH32
#define BUILD_TRAP_HANDLER(name) \ #define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \
......
...@@ -117,6 +117,7 @@ extern void free_thread_info(struct thread_info *ti); ...@@ -117,6 +117,7 @@ extern void free_thread_info(struct thread_info *ti);
#define TIF_SECCOMP 6 /* secure computing */ #define TIF_SECCOMP 6 /* secure computing */
#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */ #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
#define TIF_DEBUG 9 /* uses UBC */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 #define TIF_MEMDIE 18
...@@ -131,6 +132,7 @@ extern void free_thread_info(struct thread_info *ti); ...@@ -131,6 +132,7 @@ extern void free_thread_info(struct thread_info *ti);
#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_DEBUG (1 << TIF_DEBUG)
#define _TIF_USEDFPU (1 << TIF_USEDFPU) #define _TIF_USEDFPU (1 << TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_FREEZE (1 << TIF_FREEZE)
......
...@@ -10,8 +10,8 @@ ...@@ -10,8 +10,8 @@
*/ */
#ifndef __ASM_SH_UBC_H #ifndef __ASM_SH_UBC_H
#define __ASM_SH_UBC_H #define __ASM_SH_UBC_H
#ifdef __KERNEL__
#ifdef __KERNEL__
#include <cpu/ubc.h> #include <cpu/ubc.h>
/* User Break Controller */ /* User Break Controller */
...@@ -60,6 +60,12 @@ ...@@ -60,6 +60,12 @@
#define BRCR_UBDE (1 << 0) #define BRCR_UBDE (1 << 0)
#endif #endif
/*
* All SH parts have 2 UBC channels. I defy any hardware designer to
* invalidate this assertion.
*/
#define NR_UBC_CHANNELS 2
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* arch/sh/kernel/cpu/ubc.S */ /* arch/sh/kernel/cpu/ubc.S */
extern void ubc_sleep(void); extern void ubc_sleep(void);
......
...@@ -36,6 +36,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o ...@@ -36,6 +36,7 @@ obj-$(CONFIG_DUMP_CODE) += disassemble.o
obj-$(CONFIG_HIBERNATION) += swsusp.o obj-$(CONFIG_HIBERNATION) += swsusp.o
obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o
EXTRA_CFLAGS += -Werror EXTRA_CFLAGS += -Werror
...@@ -49,7 +49,7 @@ ENTRY(exception_handling_table) ...@@ -49,7 +49,7 @@ ENTRY(exception_handling_table)
.long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */
.long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/
.long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger
.long break_point_trap /* 1E0 */ .long breakpoint_trap_handler /* 1E0 */
/* /*
* Pad the remainder of the table out, exceptions residing in far * Pad the remainder of the table out, exceptions residing in far
......
/*
* arch/sh/kernel/hw_breakpoint.c
*
* Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
*
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/percpu.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/io.h>
#include <asm/hw_breakpoint.h>
#include <asm/mmu_context.h>
struct ubc_context {
unsigned long pc;
unsigned long state;
};
/* Per cpu ubc channel state */
static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]);
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for each cpus
*/
static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
static int __init ubc_init(void)
{
__raw_writel(0, UBC_CAMR0);
__raw_writel(0, UBC_CBR0);
__raw_writel(0, UBC_CBCR);
__raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0);
/* dummy read for write posting */
(void)__raw_readl(UBC_CRR0);
return 0;
}
arch_initcall(ubc_init);
/*
* Install a perf counter breakpoint.
*
* We seek a free UBC channel and use it for this breakpoint.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
struct ubc_context *ubc_ctx;
int i;
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (!*slot) {
*slot = bp;
break;
}
}
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return -EBUSY;
ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
ubc_ctx->pc = info->address;
ubc_ctx->state = info->len | info->type;
__raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0);
__raw_writel(ubc_ctx->pc, UBC_CAR0);
return 0;
}
/*
* Uninstall the breakpoint contained in the given counter.
*
* First we search the debug address register it uses and then we disable
* it.
*
* Atomic: we hold the counter->ctx->lock and we only handle variables
* and registers local to this cpu.
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
struct ubc_context *ubc_ctx;
int i;
for (i = 0; i < HBP_NUM; i++) {
struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
break;
}
}
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;
ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
ubc_ctx->pc = 0;
ubc_ctx->state &= ~(info->len | info->type);
__raw_writel(ubc_ctx->pc, UBC_CBR0);
__raw_writel(ubc_ctx->state, UBC_CAR0);
}
static int get_hbp_len(u16 hbp_len)
{
unsigned int len_in_bytes = 0;
switch (hbp_len) {
case SH_BREAKPOINT_LEN_1:
len_in_bytes = 1;
break;
case SH_BREAKPOINT_LEN_2:
len_in_bytes = 2;
break;
case SH_BREAKPOINT_LEN_4:
len_in_bytes = 4;
break;
case SH_BREAKPOINT_LEN_8:
len_in_bytes = 8;
break;
}
return len_in_bytes;
}
/*
* Check for virtual address in user space.
*/
int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
{
unsigned int len;
len = get_hbp_len(hbp_len);
return (va <= TASK_SIZE - len);
}
/*
* Check for virtual address in kernel space.
*/
static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
{
unsigned int len;
len = get_hbp_len(hbp_len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
/*
* Store a breakpoint's encoded address, length, and type.
*/
static int arch_store_info(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
/*
* User-space requests will always have the address field populated
* For kernel-addresses, either the address or symbol name can be
* specified.
*/
if (info->name)
info->address = (unsigned long)kallsyms_lookup_name(info->name);
if (info->address) {
info->asid = get_asid();
return 0;
}
return -EINVAL;
}
int arch_bp_generic_fields(int sh_len, int sh_type,
int *gen_len, int *gen_type)
{
/* Len */
switch (sh_len) {
case SH_BREAKPOINT_LEN_1:
*gen_len = HW_BREAKPOINT_LEN_1;
break;
case SH_BREAKPOINT_LEN_2:
*gen_len = HW_BREAKPOINT_LEN_2;
break;
case SH_BREAKPOINT_LEN_4:
*gen_len = HW_BREAKPOINT_LEN_4;
break;
case SH_BREAKPOINT_LEN_8:
*gen_len = HW_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
case SH_BREAKPOINT_RW:
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
break;
default:
return -EINVAL;
}
return 0;
}
static int arch_build_bp_info(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
info->address = bp->attr.bp_addr;
/* Len */
switch (bp->attr.bp_len) {
case HW_BREAKPOINT_LEN_1:
info->len = SH_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
info->len = SH_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
info->len = SH_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
info->len = SH_BREAKPOINT_LEN_8;
break;
default:
return -EINVAL;
}
/* Type */
switch (bp->attr.bp_type) {
case HW_BREAKPOINT_R:
info->type = SH_BREAKPOINT_READ;
break;
case HW_BREAKPOINT_W:
info->type = SH_BREAKPOINT_WRITE;
break;
case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
info->type = SH_BREAKPOINT_RW;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Validate the arch-specific HW Breakpoint register settings
*/
int arch_validate_hwbkpt_settings(struct perf_event *bp,
struct task_struct *tsk)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
unsigned int align;
int ret;
ret = arch_build_bp_info(bp);
if (ret)
return ret;
ret = -EINVAL;
switch (info->len) {
case SH_BREAKPOINT_LEN_1:
align = 0;
break;
case SH_BREAKPOINT_LEN_2:
align = 1;
break;
case SH_BREAKPOINT_LEN_4:
align = 3;
break;
case SH_BREAKPOINT_LEN_8:
align = 7;
break;
default:
return ret;
}
if (bp->callback)
ret = arch_store_info(bp);
if (ret < 0)
return ret;
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
*/
if (info->address & align)
return -EINVAL;
/* Check that the virtual address is in the proper range */
if (tsk) {
if (!arch_check_va_in_userspace(info->address, info->len))
return -EFAULT;
} else {
if (!arch_check_va_in_kernelspace(info->address, info->len))
return -EFAULT;
}
return 0;
}
/*
* Release the user breakpoints used by ptrace
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < HBP_NUM; i++) {
unregister_hw_breakpoint(t->ptrace_bps[i]);
t->ptrace_bps[i] = NULL;
}
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int cpu, i, rc = NOTIFY_STOP;
struct perf_event *bp;
unsigned long val;
val = __raw_readl(UBC_CBR0);
__raw_writel(val & ~UBC_CBR_CE, UBC_CBR0);
cpu = get_cpu();
for (i = 0; i < HBP_NUM; i++) {
/*
* The counter may be concurrently released but that can only
* occur from a call_rcu() path. We can then safely fetch
* the breakpoint, use its callback, touch its counter
* while we are in an rcu_read_lock() path.
*/
rcu_read_lock();
bp = per_cpu(bp_per_reg[i], cpu);
if (bp) {
rc = NOTIFY_DONE;
} else {
rcu_read_unlock();
break;
}
(bp->callback)(bp, args->regs);
rcu_read_unlock();
}
if (bp) {
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
__raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0);
__raw_writel(info->address, UBC_CAR0);
}
put_cpu();
return rc;
}
BUILD_TRAP_HANDLER(breakpoint)
{
unsigned long ex = lookup_exception_vector();
TRAP_HANDLER_DECL;
notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
}
/*
* Handle debug exception notifications.
*/
int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
if (val != DIE_BREAKPOINT)
return NOTIFY_DONE;
return hw_breakpoint_handler(data);
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
{
/* TODO */
}
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -34,8 +35,6 @@ ...@@ -34,8 +35,6 @@
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/watchdog.h> #include <asm/watchdog.h>
int ubc_usercnt = 0;
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
static void watchdog_trigger_immediate(void) static void watchdog_trigger_immediate(void)
{ {
...@@ -148,16 +147,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) ...@@ -148,16 +147,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
*/ */
void exit_thread(void) void exit_thread(void)
{ {
if (current->thread.ubc_pc) {
current->thread.ubc_pc = 0;
ubc_usercnt -= 1;
}
} }
void flush_thread(void) void flush_thread(void)
{ {
#if defined(CONFIG_SH_FPU)
struct task_struct *tsk = current; struct task_struct *tsk = current;
flush_ptrace_hw_breakpoint(tsk);
#if defined(CONFIG_SH_FPU)
/* Forget lazy FPU state */ /* Forget lazy FPU state */
clear_fpu(tsk, task_pt_regs(tsk)); clear_fpu(tsk, task_pt_regs(tsk));
clear_used_math(); clear_used_math();
...@@ -195,9 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -195,9 +193,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
{ {
struct thread_info *ti = task_thread_info(p); struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs; struct pt_regs *childregs;
#if defined(CONFIG_SH_FPU) || defined(CONFIG_SH_DSP)
struct task_struct *tsk = current; struct task_struct *tsk = current;
#endif
#if defined(CONFIG_SH_FPU) #if defined(CONFIG_SH_FPU)
unlazy_fpu(tsk, regs); unlazy_fpu(tsk, regs);
...@@ -234,53 +230,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -234,53 +230,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.sp = (unsigned long) childregs; p->thread.sp = (unsigned long) childregs;
p->thread.pc = (unsigned long) ret_from_fork; p->thread.pc = (unsigned long) ret_from_fork;
p->thread.ubc_pc = 0; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
return 0; return 0;
} }
/* Tracing by user break controller. */
static void ubc_set_tracing(int asid, unsigned long pc)
{
#if defined(CONFIG_CPU_SH4A)
unsigned long val;
val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
ctrl_outl(val, UBC_CBR0);
ctrl_outl(pc, UBC_CAR0);
ctrl_outl(0x0, UBC_CAMR0);
ctrl_outl(0x0, UBC_CBCR);
val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
ctrl_outl(val, UBC_CRR0);
/* Read UBC register that we wrote last, for checking update */
val = ctrl_inl(UBC_CRR0);
#else /* CONFIG_CPU_SH4A */
ctrl_outl(pc, UBC_BARA);
#ifdef CONFIG_MMU
ctrl_outb(asid, UBC_BASRA);
#endif
ctrl_outl(0, UBC_BAMRA);
if (current_cpu_data.type == CPU_SH7729 ||
current_cpu_data.type == CPU_SH7710 ||
current_cpu_data.type == CPU_SH7712 ||
current_cpu_data.type == CPU_SH7203){
ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
} else {
ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
ctrl_outw(BRCR_PCBA, UBC_BRCR);
}
#endif /* CONFIG_CPU_SH4A */
}
/* /*
* switch_to(x,y) should switch tasks from x to y. * switch_to(x,y) should switch tasks from x to y.
* *
...@@ -302,25 +256,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next) ...@@ -302,25 +256,6 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
: "r" (task_thread_info(next))); : "r" (task_thread_info(next)));
#endif #endif
/* If no tasks are using the UBC, we're done */
if (ubc_usercnt == 0)
/* If no tasks are using the UBC, we're done */;
else if (next->thread.ubc_pc && next->mm) {
int asid = 0;
#ifdef CONFIG_MMU
asid |= cpu_asid(smp_processor_id(), next->mm);
#endif
ubc_set_tracing(asid, next->thread.ubc_pc);
} else {
#if defined(CONFIG_CPU_SH4A)
ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
#else
ctrl_outw(0, UBC_BBRA);
ctrl_outw(0, UBC_BBRB);
#endif
}
return prev; return prev;
} }
...@@ -412,20 +347,3 @@ unsigned long get_wchan(struct task_struct *p) ...@@ -412,20 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
return pc; return pc;
} }
asmlinkage void break_point_trap(void)
{
/* Clear tracing. */
#if defined(CONFIG_CPU_SH4A)
ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
#else
ctrl_outw(0, UBC_BBRA);
ctrl_outw(0, UBC_BBRB);
ctrl_outl(0, UBC_BRCR);
#endif
current->thread.ubc_pc = 0;
ubc_usercnt -= 1;
force_sig(SIGTRAP, current);
}
...@@ -65,31 +65,12 @@ static inline int put_stack_long(struct task_struct *task, int offset, ...@@ -65,31 +65,12 @@ static inline int put_stack_long(struct task_struct *task, int offset,
void user_enable_single_step(struct task_struct *child) void user_enable_single_step(struct task_struct *child)
{ {
/* Next scheduling will set up UBC */
if (child->thread.ubc_pc == 0)
ubc_usercnt += 1;
child->thread.ubc_pc = get_stack_long(child,
offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP); set_tsk_thread_flag(child, TIF_SINGLESTEP);
} }
void user_disable_single_step(struct task_struct *child) void user_disable_single_step(struct task_struct *child)
{ {
clear_tsk_thread_flag(child, TIF_SINGLESTEP); clear_tsk_thread_flag(child, TIF_SINGLESTEP);
/*
* Ensure the UBC is not programmed at the next context switch.
*
* Normally this is not needed but there are sequences such as
* singlestep, signal delivery, and continue that leave the
* ubc_pc non-zero leading to spurious SIGTRAPs.
*/
if (child->thread.ubc_pc != 0) {
ubc_usercnt -= 1;
child->thread.ubc_pc = 0;
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment