Commit 3dab04e6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300:
  MN10300: gcc 4.6 vs am33 inline assembly
  MN10300: Deprecate gdbstub
  MN10300: Allow KGDB to use the MN10300 serial ports
  MN10300: Emulate single stepping in KGDB on MN10300
  MN10300: Generalise kernel debugger kernel halt, reboot or power off hook
  KGDB: Notify GDB of machine halt, reboot or power off
  MN10300: Use KGDB
  MN10300: Create generic kernel debugger hooks
  MN10300: Create general kernel debugger cache flushing
  MN10300: Introduce a general config option for kernel debugger hooks
  MN10300: The icache invalidate functions should disable the icache first
  MN10300: gdbstub: Restrict single-stepping to non-preemptable non-SMP configs
parents 6d1e9a42 5a4b65ab
......@@ -3,6 +3,8 @@ config MN10300
select HAVE_OPROFILE
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB
config AM33_2
def_bool n
......@@ -401,9 +403,9 @@ comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highes
comment "____Non-maskable interrupt levels____"
comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
config GDBSTUB_IRQ_LEVEL
int "GDBSTUB interrupt priority"
depends on GDBSTUB
config DEBUGGER_IRQ_LEVEL
int "DEBUGGER interrupt priority"
depends on KERNEL_DEBUGGER
range 0 1 if LINUX_CLI_LEVEL = 2
range 0 2 if LINUX_CLI_LEVEL = 3
range 0 3 if LINUX_CLI_LEVEL = 4
......@@ -437,7 +439,7 @@ config LINUX_CLI_LEVEL
EPSW.IM from 7. Any interrupt is permitted for which the level is
lower than EPSW.IM.
Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
Certain interrupts, such as DEBUGGER and virtual MN10300 on-chip
serial DMA interrupts are allowed to interrupt normal disabled
sections.
......
......@@ -36,7 +36,7 @@ config KPROBES
config GDBSTUB
bool "Remote GDB kernel debugging"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && DEPRECATED
select DEBUG_INFO
select FRAME_POINTER
help
......@@ -46,6 +46,9 @@ config GDBSTUB
RAM to avoid excessive linking time. This is only useful for kernel
hackers. If unsure, say N.
This is deprecated in favour of KGDB and will be removed in a later
version.
config GDBSTUB_IMMEDIATE
bool "Break into GDB stub immediately"
depends on GDBSTUB
......@@ -54,6 +57,14 @@ config GDBSTUB_IMMEDIATE
possible, leaving the program counter at the beginning of
start_kernel() in init/main.c.
config GDBSTUB_ALLOW_SINGLE_STEP
bool "Allow software single-stepping in GDB stub"
depends on GDBSTUB && !SMP && !PREEMPT
help
Allow GDB stub to perform software single-stepping through the
kernel. This doesn't work very well on SMP or preemptible kernels as
it uses temporary breakpoints to emulate single-stepping.
config GDB_CONSOLE
bool "Console output to GDB"
depends on GDBSTUB
......@@ -142,3 +153,7 @@ config GDBSTUB_ON_TTYSx
default y
endmenu
config KERNEL_DEBUGGER
def_bool y
depends on GDBSTUB || KGDB
/* Kernel debugger for MN10300
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_DEBUGGER_H
#define _ASM_DEBUGGER_H
#if defined(CONFIG_KERNEL_DEBUGGER)
extern int debugger_intercept(enum exception_code, int, int, struct pt_regs *);
extern int at_debugger_breakpoint(struct pt_regs *);
#ifndef CONFIG_MN10300_DEBUGGER_CACHE_NO_FLUSH
extern void debugger_local_cache_flushinv(void);
extern void debugger_local_cache_flushinv_one(u8 *);
#else
static inline void debugger_local_cache_flushinv(void) {}
static inline void debugger_local_cache_flushinv_one(u8 *addr) {}
#endif
#else /* CONFIG_KERNEL_DEBUGGER */
static inline int debugger_intercept(enum exception_code excep,
int signo, int si_code,
struct pt_regs *regs)
{
return 0;
}
static inline int at_debugger_breakpoint(struct pt_regs *regs)
{
return 0;
}
#endif /* CONFIG_KERNEL_DEBUGGER */
#endif /* _ASM_DEBUGGER_H */
......@@ -15,6 +15,19 @@
extern void ____unhandled_size_in_do_div___(void);
/*
* Beginning with gcc 4.6, the MDR register is represented explicitly. We
* must, therefore, at least explicitly clobber the register when we make
* changes to it. The following assembly fragments *could* be rearranged in
* order to leave the moves to/from the MDR register to the compiler, but the
* gains would be minimal at best.
*/
#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
# define CLOBBER_MDR_CC "mdr", "cc"
#else
# define CLOBBER_MDR_CC "cc"
#endif
/*
* divide n by base, leaving the result in n and returning the remainder
* - we can do this quite efficiently on the MN10300 by cascading the divides
......@@ -29,7 +42,7 @@ extern void ____unhandled_size_in_do_div___(void);
"mov mdr,%1 \n" \
: "+r"(n), "=d"(__rem) \
: "r"(base), "1"(__rem) \
: "cc" \
: CLOBBER_MDR_CC \
); \
} else if (sizeof(n) <= 8) { \
union { \
......@@ -48,7 +61,7 @@ extern void ____unhandled_size_in_do_div___(void);
: "=d"(__rem), "=r"(__quot.w[1]), "=r"(__quot.w[0]) \
: "r"(base), "0"(__rem), "1"(__quot.w[1]), \
"2"(__quot.w[0]) \
: "cc" \
: CLOBBER_MDR_CC \
); \
n = __quot.l; \
} else { \
......@@ -72,7 +85,7 @@ unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
: CLOBBER_MDR_CC
);
return result;
......@@ -93,7 +106,7 @@ signed __muldiv64s(signed val, signed mult, signed div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
: "cc"
: CLOBBER_MDR_CC
);
return result;
......
......@@ -55,7 +55,6 @@ static inline void clear_using_fpu(struct task_struct *tsk)
extern asmlinkage void fpu_kill_state(struct task_struct *);
extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
extern asmlinkage void fpu_init_state(void);
extern asmlinkage void fpu_save(struct fpu_state_struct *);
extern int fpu_setup_sigcontext(struct fpucontext *buf);
......@@ -113,7 +112,6 @@ static inline void flush_fpu(void)
extern asmlinkage
void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
#define fpu_invalid_op unexpected_fpu_exception
#define fpu_exception unexpected_fpu_exception
struct task_struct;
......
......@@ -20,7 +20,7 @@
/*
* interrupt control
* - "disabled": run in IM1/2
* - level 0 - GDB stub
* - level 0 - kernel debugger
* - level 1 - virtual serial DMA (if present)
* - level 5 - normal interrupt priority
* - level 6 - timer interrupt
......
/* Kernel debugger for MN10300
*
* Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_KGDB_H
#define _ASM_KGDB_H
/*
* BUFMAX defines the maximum number of characters in inbound/outbound
* buffers at least NUMREGBYTES*2 are needed for register packets
* Longer buffer is needed to list all threads
*/
#define BUFMAX 1024
/*
* Note that this register image is in a different order than the register
* image that Linux produces at interrupt time.
*/
enum regnames {
GDB_FR_D0 = 0,
GDB_FR_D1 = 1,
GDB_FR_D2 = 2,
GDB_FR_D3 = 3,
GDB_FR_A0 = 4,
GDB_FR_A1 = 5,
GDB_FR_A2 = 6,
GDB_FR_A3 = 7,
GDB_FR_SP = 8,
GDB_FR_PC = 9,
GDB_FR_MDR = 10,
GDB_FR_EPSW = 11,
GDB_FR_LIR = 12,
GDB_FR_LAR = 13,
GDB_FR_MDRQ = 14,
GDB_FR_E0 = 15,
GDB_FR_E1 = 16,
GDB_FR_E2 = 17,
GDB_FR_E3 = 18,
GDB_FR_E4 = 19,
GDB_FR_E5 = 20,
GDB_FR_E6 = 21,
GDB_FR_E7 = 22,
GDB_FR_SSP = 23,
GDB_FR_MSP = 24,
GDB_FR_USP = 25,
GDB_FR_MCRH = 26,
GDB_FR_MCRL = 27,
GDB_FR_MCVF = 28,
GDB_FR_FPCR = 29,
GDB_FR_DUMMY0 = 30,
GDB_FR_DUMMY1 = 31,
GDB_FR_FS0 = 32,
GDB_FR_SIZE = 64,
};
#define GDB_ORIG_D0 41
#define NUMREGBYTES (GDB_FR_SIZE*4)
static inline void arch_kgdb_breakpoint(void)
{
asm(".globl __arch_kgdb_breakpoint; __arch_kgdb_breakpoint: break");
}
extern u8 __arch_kgdb_breakpoint;
#define BREAK_INSTR_SIZE 1
#define CACHE_FLUSH_IS_SAFE 1
#endif /* _ASM_KGDB_H */
......@@ -34,7 +34,7 @@
#define LOCAL_TIMER_IPI 193
#define FLUSH_CACHE_IPI 194
#define CALL_FUNCTION_NMI_IPI 195
#define GDB_NMI_IPI 196
#define DEBUGGER_NMI_IPI 196
#define SMP_BOOT_IRQ 195
......@@ -43,6 +43,7 @@
#define LOCAL_TIMER_GxICR_LV GxICR_LEVEL_4
#define FLUSH_CACHE_GxICR_LV GxICR_LEVEL_0
#define SMP_BOOT_GxICR_LV GxICR_LEVEL_0
#define DEBUGGER_GxICR_LV CONFIG_DEBUGGER_IRQ_LEVEL
#define TIME_OUT_COUNT_BOOT_IPI 100
#define DELAY_TIME_BOOT_IPI 75000
......@@ -61,8 +62,9 @@
* An alternate way of dealing with this could be to use the EPSW.S bits to
* cache this information for systems with up to four CPUs.
*/
#define arch_smp_processor_id() (CPUID)
#if 0
#define raw_smp_processor_id() (CPUID)
#define raw_smp_processor_id() (arch_smp_processor_id())
#else
#define raw_smp_processor_id() (current_thread_info()->cpu)
#endif
......
......@@ -131,7 +131,11 @@ static inline unsigned long current_stack_pointer(void)
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif
#ifndef CONFIG_KGDB
#define free_thread_info(ti) kfree((ti))
#else
extern void free_thread_info(struct thread_info *);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
......
......@@ -21,11 +21,8 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
obj-$(CONFIG_GDBSTUB) += gdb-cache.o
endif
obj-$(CONFIG_MN10300_RTC) += rtc.o
obj-$(CONFIG_PROFILE) += profile.o profile-low.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += kgdb.o
......@@ -266,7 +266,11 @@ ENTRY(raw_bus_error)
###############################################################################
#
# Miscellaneous exception entry points
# NMI exception entry points
#
# This is used by ordinary interrupt channels that have the GxICR_NMI bit set
# in addition to the main NMI and Watchdog channels. SMP NMI IPIs use this
# facility.
#
###############################################################################
ENTRY(nmi_handler)
......@@ -281,7 +285,7 @@ ENTRY(nmi_handler)
and NMIAGR_GN,d0
lsr 0x2,d0
cmp CALL_FUNCTION_NMI_IPI,d0
bne 5f # if not call function, jump
bne nmi_not_smp_callfunc # if not call function, jump
# function call nmi ipi
add 4,sp # no need to store TBR
......@@ -295,59 +299,38 @@ ENTRY(nmi_handler)
call smp_nmi_call_function_interrupt[],0
RESTORE_ALL
5:
#ifdef CONFIG_GDBSTUB
cmp GDB_NMI_IPI,d0
bne 3f # if not gdb nmi ipi, jump
nmi_not_smp_callfunc:
#ifdef CONFIG_KERNEL_DEBUGGER
cmp DEBUGGER_NMI_IPI,d0
bne nmi_not_debugger # if not kernel debugger NMI IPI, jump
# gdb nmi ipi
# kernel debugger NMI IPI
add 4,sp # no need to store TBR
mov GxICR_DETECT,d0 # clear NMI
movbu d0,(GxICR(GDB_NMI_IPI))
movhu (GxICR(GDB_NMI_IPI)),d0
movbu d0,(GxICR(DEBUGGER_NMI_IPI))
movhu (GxICR(DEBUGGER_NMI_IPI)),d0
and ~EPSW_NMID,epsw # enable NMI
#ifdef CONFIG_MN10300_CACHE_ENABLED
mov (gdbstub_nmi_opr_type),d0
cmp GDBSTUB_NMI_CACHE_PURGE,d0
bne 4f # if not gdb cache purge, jump
# gdb cache purge nmi ipi
add -20,sp
mov d1,(4,sp)
mov a0,(8,sp)
mov a1,(12,sp)
mov mdr,d0
mov d0,(16,sp)
call gdbstub_local_purge_cache[],0
mov 0x1,d0
mov (CPUID),d1
asl d1,d0
mov gdbstub_nmi_cpumask,a0
bclr d0,(a0)
mov (4,sp),d1
mov (8,sp),a0
mov (12,sp),a1
mov (16,sp),d0
mov d0,mdr
add 20,sp
mov (sp),d0
add 4,sp
rti
4:
#endif /* CONFIG_MN10300_CACHE_ENABLED */
# gdb wait nmi ipi
mov (sp),d0
SAVE_ALL
call gdbstub_nmi_wait[],0
mov fp,d0 # arg 0: stacked register file
mov a2,d1 # arg 1: exception number
call debugger_nmi_interrupt[],0
RESTORE_ALL
3:
#endif /* CONFIG_GDBSTUB */
nmi_not_debugger:
#endif /* CONFIG_KERNEL_DEBUGGER */
mov (sp),d0 # restore TBR to d0
add 4,sp
#endif /* CONFIG_SMP */
bra __common_exception_nonmi
###############################################################################
#
# General exception entry point
#
###############################################################################
ENTRY(__common_exception)
add -4,sp
mov d0,(sp)
......
......@@ -69,24 +69,6 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
force_sig_info(SIGFPE, &info, tsk);
}
/*
* handle an FPU invalid_op exception
* - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
*/
asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
{
siginfo_t info;
if (!user_mode(regs))
die_if_no_fixup("FPU invalid opcode", regs, code);
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_COPROC;
info.si_addr = (void *) regs->pc;
force_sig_info(info.si_signo, &info, current);
}
/*
* save the FPU state to a signal context
*/
......
###############################################################################
#
# MN10300 Low-level cache purging routines for gdbstub
#
# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
# Written by David Howells (dhowells@redhat.com)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public Licence
# as published by the Free Software Foundation; either version
# 2 of the Licence, or (at your option) any later version.
#
###############################################################################
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/cache.h>
#include <asm/cpu-regs.h>
#include <asm/exceptions.h>
#include <asm/frame.inc>
#include <asm/serial-regs.h>
.text
###############################################################################
#
# GDB stub cache purge
#
###############################################################################
.type gdbstub_purge_cache,@function
ENTRY(gdbstub_purge_cache)
#######################################################################
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
mn10300_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
mn10300_dcache_flush_skip:
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,d1
bne mn10300_dcache_flush_loop
;; # unconditionally flush and invalidate the dcache
;; mov DCACHE_PURGE(0,0),a1 # dcache purge request address
;; mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of
;; # entries
;;
;; gdbstub_purge_cache__dcache_loop:
;; mov (a1),d0 # unconditional purge
;;
;; add L1_CACHE_BYTES,a1
;; add -1,d1
;; bne gdbstub_purge_cache__dcache_loop
#######################################################################
# now invalidate the icache
mov CHCTR,a0
movhu (a0),a1
mov epsw,d1
and ~EPSW_IE,epsw
nop
nop
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
movhu a1,(a0)
movhu (a0),d0 # read back to flush
# (SIGILLs all over without this)
mov d1,epsw
ret [],0
.size gdbstub_purge_cache,.-gdbstub_purge_cache
......@@ -59,10 +59,10 @@ void __init gdbstub_io_init(void)
/* we want to get serial receive interrupts */
set_intr_level(gdbstub_port->rx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
NUM2GxICR_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL));
set_intr_level(gdbstub_port->tx_irq,
NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
NUM2GxICR_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL));
set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_DEBUGGER_IRQ_LEVEL),
gdbstub_io_rx_handler);
*gdbstub_port->rx_icr |= GxICR_ENABLE;
......@@ -88,7 +88,7 @@ void __init gdbstub_io_init(void)
/* permit level 0 IRQs only */
arch_local_change_intr_mask_level(
NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
NUM2EPSW_IM(CONFIG_DEBUGGER_IRQ_LEVEL + 1));
}
/*
......
......@@ -133,7 +133,7 @@
#include <asm/system.h>
#include <asm/gdb-stub.h>
#include <asm/exceptions.h>
#include <asm/cacheflush.h>
#include <asm/debugger.h>
#include <asm/serial-regs.h>
#include <asm/busctl-regs.h>
#include <unit/leds.h>
......@@ -405,6 +405,7 @@ static int hexToInt(char **ptr, int *intValue)
return (numChars);
}
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
/*
* We single-step by setting breakpoints. When an exception
* is handled, we need to restore the instructions hoisted
......@@ -729,6 +730,7 @@ static int gdbstub_single_step(struct pt_regs *regs)
__gdbstub_restore_bp();
return -EFAULT;
}
#endif /* CONFIG_GDBSTUB_ALLOW_SINGLE_STEP */
#ifdef CONFIG_GDBSTUB_CONSOLE
......@@ -1171,7 +1173,7 @@ int gdbstub_clear_breakpoint(u8 *addr, int len)
/*
* This function does all command processing for interfacing to gdb
* - returns 1 if the exception should be skipped, 0 otherwise.
* - returns 0 if the exception should be skipped, -ERROR otherwise.
*/
static int gdbstub(struct pt_regs *regs, enum exception_code excep)
{
......@@ -1186,7 +1188,7 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
int loop;
if (excep == EXCEP_FPU_DISABLED)
return 0;
return -ENOTSUPP;
gdbstub_flush_caches = 0;
......@@ -1195,7 +1197,7 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
asm volatile("mov mdr,%0" : "=d"(mdr));
local_save_flags(epsw);
arch_local_change_intr_mask_level(
NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
NUM2EPSW_IM(CONFIG_DEBUGGER_IRQ_LEVEL + 1));
gdbstub_store_fpu();
......@@ -1208,11 +1210,13 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
/* if we were single stepping, restore the opcodes hoisted for the
* breakpoint[s] */
broke = 0;
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
if ((step_bp[0].addr && step_bp[0].addr == (u8 *) regs->pc) ||
(step_bp[1].addr && step_bp[1].addr == (u8 *) regs->pc))
broke = 1;
__gdbstub_restore_bp();
#endif
if (gdbstub_rx_unget) {
sigval = SIGINT;
......@@ -1548,17 +1552,21 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
* Step to next instruction
*/
case 's':
/*
* using the T flag doesn't seem to perform single
/* Using the T flag doesn't seem to perform single
* stepping (it seems to wind up being caught by the
* JTAG unit), so we have to use breakpoints and
* continue instead.
*/
#ifdef CONFIG_GDBSTUB_ALLOW_SINGLE_STEP
if (gdbstub_single_step(regs) < 0)
/* ignore any fault error for now */
gdbstub_printk("unable to set single-step"
" bp\n");
goto done;
#else
gdbstub_strcpy(output_buffer, "E01");
break;
#endif
/*
* Set baud rate (bBB)
......@@ -1657,7 +1665,7 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
* NB: We flush both caches, just to be sure...
*/
if (gdbstub_flush_caches)
gdbstub_purge_cache();
debugger_local_cache_flushinv();
gdbstub_load_fpu();
mn10300_set_gdbleds(0);
......@@ -1667,14 +1675,23 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
touch_softlockup_watchdog();
local_irq_restore(epsw);
return 1;
return 0;
}
/*
* Determine if we hit a debugger special breakpoint that needs skipping over
* automatically.
*/
int at_debugger_breakpoint(struct pt_regs *regs)
{
return 0;
}
/*
* handle event interception
*/
asmlinkage int gdbstub_intercept(struct pt_regs *regs,
enum exception_code excep)
asmlinkage int debugger_intercept(enum exception_code excep,
int signo, int si_code, struct pt_regs *regs)
{
static u8 notfirst = 1;
int ret;
......@@ -1688,7 +1705,7 @@ asmlinkage int gdbstub_intercept(struct pt_regs *regs,
asm("mov mdr,%0" : "=d"(mdr));
gdbstub_entry(
"--> gdbstub_intercept(%p,%04x) [MDR=%lx PC=%lx]\n",
"--> debugger_intercept(%p,%04x) [MDR=%lx PC=%lx]\n",
regs, excep, mdr, regs->pc);
gdbstub_entry(
......@@ -1722,7 +1739,7 @@ asmlinkage int gdbstub_intercept(struct pt_regs *regs,
ret = gdbstub(regs, excep);
gdbstub_entry("<-- gdbstub_intercept()\n");
gdbstub_entry("<-- debugger_intercept()\n");
gdbstub_busy = 0;
return ret;
}
......
......@@ -29,6 +29,13 @@ extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
extern void mn10300_low_ipi_handler(void);
#endif
/*
* smp.c
*/
#ifdef CONFIG_SMP
extern void smp_jump_to_debugger(void);
#endif
/*
* time.c
*/
......
......@@ -153,7 +153,7 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case GDB_NMI_IPI:
case DEBUGGER_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:
......
This diff is collapsed.
......@@ -119,6 +119,10 @@ static int mn10300_serial_request_port(struct uart_port *);
static void mn10300_serial_config_port(struct uart_port *, int);
static int mn10300_serial_verify_port(struct uart_port *,
struct serial_struct *);
#ifdef CONFIG_CONSOLE_POLL
static void mn10300_serial_poll_put_char(struct uart_port *, unsigned char);
static int mn10300_serial_poll_get_char(struct uart_port *);
#endif
static const struct uart_ops mn10300_serial_ops = {
.tx_empty = mn10300_serial_tx_empty,
......@@ -138,6 +142,10 @@ static const struct uart_ops mn10300_serial_ops = {
.request_port = mn10300_serial_request_port,
.config_port = mn10300_serial_config_port,
.verify_port = mn10300_serial_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = mn10300_serial_poll_put_char,
.poll_get_char = mn10300_serial_poll_get_char,
#endif
};
static irqreturn_t mn10300_serial_interrupt(int irq, void *dev_id);
......@@ -1634,3 +1642,70 @@ static int __init mn10300_serial_console_init(void)
console_initcall(mn10300_serial_console_init);
#endif
#ifdef CONFIG_CONSOLE_POLL
/*
* Polled character reception for the kernel debugger
*/
static int mn10300_serial_poll_get_char(struct uart_port *_port)
{
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
unsigned ix;
u8 st, ch;
_enter("%s", port->name);
do {
/* pull chars out of the hat */
ix = port->rx_outp;
if (ix == port->rx_inp)
return NO_POLL_CHAR;
ch = port->rx_buffer[ix++];
st = port->rx_buffer[ix++];
smp_rmb();
port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1);
} while (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF));
return ch;
}
/*
* Polled character transmission for the kernel debugger
*/
static void mn10300_serial_poll_put_char(struct uart_port *_port,
unsigned char ch)
{
struct mn10300_serial_port *port =
container_of(_port, struct mn10300_serial_port, uart);
u8 intr, tmp;
/* wait for the transmitter to finish anything it might be doing (and
* this includes the virtual DMA handler, so it might take a while) */
while (*port->_status & (SC01STR_TBF | SC01STR_TXF))
continue;
/* disable the Tx ready interrupt */
intr = *port->_intr;
*port->_intr = intr & ~SC01ICR_TI;
tmp = *port->_intr;
if (ch == 0x0a) {
*(u8 *) port->_txb = 0x0d;
while (*port->_status & SC01STR_TBF)
continue;
}
*(u8 *) port->_txb = ch;
while (*port->_status & SC01STR_TBF)
continue;
/* restore the Tx interrupt flag */
*port->_intr = intr;
tmp = *port->_intr;
}
#endif /* CONFIG_CONSOLE_POLL */
......@@ -135,7 +135,7 @@ void release_segments(struct mm_struct *mm)
void machine_restart(char *cmd)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
......@@ -148,14 +148,14 @@ void machine_restart(char *cmd)
void machine_halt(void)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
}
void machine_power_off(void)
{
#ifdef CONFIG_GDBSTUB
#ifdef CONFIG_KERNEL_DEBUGGER
gdbstub_exit(0);
#endif
}
......
......@@ -439,6 +439,22 @@ int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
return ret;
}
/**
* smp_jump_to_debugger - Make other CPUs enter the debugger by sending an IPI
*
* Send a non-maskable request to all other CPUs in the system, instructing
* them to jump into the debugger. The caller is responsible for checking that
* the other CPUs responded to the instruction.
*
* The caller should make sure that this CPU's debugger IPI is disabled.
*/
void smp_jump_to_debugger(void)
{
if (num_online_cpus() > 1)
/* Send a message to all other CPUs */
send_IPI_allbutself(DEBUGGER_NMI_IPI);
}
/**
* stop_this_cpu - Callback to stop a CPU.
* @unused: Callback context (ignored).
......@@ -603,7 +619,7 @@ static void __init smp_cpu_init(void)
/**
* smp_prepare_cpu_init - Initialise CPU in startup_secondary
*
* Set interrupt level 0-6 setting and init ICR of gdbstub.
* Set interrupt level 0-6 setting and init ICR of the kernel debugger.
*/
void smp_prepare_cpu_init(void)
{
......@@ -622,15 +638,15 @@ void smp_prepare_cpu_init(void)
for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
#ifdef CONFIG_GDBSTUB
/* initialise GDB-stub */
#ifdef CONFIG_KERNEL_DEBUGGER
/* initialise the kernel debugger interrupt */
do {
unsigned long flags;
u16 tmp16;
flags = arch_local_cli_save();
GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(GDB_NMI_IPI);
GxICR(DEBUGGER_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
tmp16 = GxICR(DEBUGGER_NMI_IPI);
arch_local_irq_restore(flags);
} while (0);
#endif
......
......@@ -39,11 +39,17 @@ ENTRY(__switch_to)
# save prev context
mov __switch_back,d0
mov d0,(THREAD_PC,a0)
mov sp,a2
mov a2,(THREAD_SP,a0)
mov a3,(THREAD_A3,a0)
#ifdef CONFIG_KGDB
btst 0xff,(kgdb_single_step)
bne __switch_to__lift_sstep_bp
__switch_to__continue:
#endif
mov d0,(THREAD_PC,a0)
mov (THREAD_A3,a1),a3
mov (THREAD_SP,a1),a2
......@@ -68,3 +74,106 @@ ENTRY(__switch_to)
__switch_back:
and ~EPSW_NMID,epsw
ret [d2,d3,a2,a3,exreg1],32
#ifdef CONFIG_KGDB
###############################################################################
#
# Lift the single-step breakpoints when the task being traced is switched out
# A0 = prev
# A1 = next
#
###############################################################################
__switch_to__lift_sstep_bp:
add -12,sp
mov a0,e4
mov a1,e5
# Clear the single-step flag to prevent us coming this way until we get
# switched back in
bclr 0xff,(kgdb_single_step)
# Remove first breakpoint
mov (kgdb_sstep_bp_addr),a2
cmp 0,a2
beq 1f
movbu (kgdb_sstep_bp),d0
movbu d0,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
1:
# Remove second breakpoint
mov (kgdb_sstep_bp_addr+4),a2
cmp 0,a2
beq 2f
movbu (kgdb_sstep_bp+1),d0
movbu d0,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
2:
# Change the resumption address and return
mov __switch_back__reinstall_sstep_bp,d0
mov e4,a0
mov e5,a1
add 12,sp
bra __switch_to__continue
###############################################################################
#
# Reinstall the single-step breakpoints when the task being traced is switched
# back in (A1 points to the new thread_struct).
#
###############################################################################
__switch_back__reinstall_sstep_bp:
add -12,sp
mov a0,e4 # save the return value
mov 0xff,d3
# Reinstall first breakpoint
mov (kgdb_sstep_bp_addr),a2
cmp 0,a2
beq 1f
movbu (a2),d0
movbu d0,(kgdb_sstep_bp)
movbu d3,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
1:
# Reinstall second breakpoint
mov (kgdb_sstep_bp_addr+4),a2
cmp 0,a2
beq 2f
movbu (a2),d0
movbu d0,(kgdb_sstep_bp+1)
movbu d3,(a2)
#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE) || defined(CONFIG_MN10300_CACHE_INV_ICACHE)
mov a2,d0
mov a2,d1
add 1,d1
calls flush_icache_range
#endif
2:
mov d3,(kgdb_single_step)
# Restore the return value (the previous thread_struct pointer)
mov e4,a0
mov a0,d0
add 12,sp
bra __switch_back
#endif /* CONFIG_KGDB */
This diff is collapsed.
......@@ -99,3 +99,49 @@ config MN10300_CACHE_INV_ICACHE
help
Set if we need the icache to be invalidated, even if the dcache is in
write-through mode and doesn't need flushing.
#
# The kernel debugger gets its own separate cache flushing functions
#
config MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using the cache tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_FLUSH_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WBACK && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to flush the dcache and invalidate the
icache using automatic purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_TAG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_TAG
help
Set if the debugger needs to invalidate the icache using the cache
tag registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_INV_BY_REG
def_bool y if KERNEL_DEBUGGER && \
MN10300_CACHE_WTHRU && \
!MN10300_CACHE_SNOOP && \
MN10300_CACHE_MANAGE_BY_REG
help
Set if the debugger needs to invalidate the icache using automatic
purge registers to make breakpoints work.
config MN10300_DEBUGGER_CACHE_NO_FLUSH
def_bool y if KERNEL_DEBUGGER && \
(MN10300_CACHE_DISABLED || MN10300_CACHE_SNOOP)
help
Set if the debugger does not need to flush the dcache and/or
invalidate the icache to make breakpoints work.
......@@ -13,6 +13,15 @@ cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_TAG) += \
cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_FLUSH_BY_REG) += \
cache-dbg-flush-by-reg.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG) += \
cache-dbg-inv-by-tag.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_REG) += \
cache-dbg-inv-by-reg.o cache-dbg-inv.o
cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
obj-y := \
......
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
mov DCPGCR,a0
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
# set mask
clr d0
mov d0,(DCPGMR)
# area purge
#
# DCPGCR = DCPGCR_DCP
#
mov DCPGCR_DCP,d0
mov d0,(a0)
# wait for busy bit of area purge
setlb
mov (a0),d0
btst DCPGCR_DCPGBSY,d0
lne
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_done
invalidate_icache 0
debugger_local_cache_flushinv_done:
mov d1,epsw
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_no_dcache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
mov d0,d1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,d1
# conditionally purge this line in all ways
mov d1,(L1_CACHE_WAYDISP*0,a0)
debugger_local_cache_flushinv_no_dcache:
#
# now try to flush the icache
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_range_reg_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Flush the entire data cache back to RAM and invalidate the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# firstly flush the dcache
#
movhu (CHCTR),d0
btst CHCTR_DCEN|CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
btst CHCTR_DCEN,d0
beq debugger_local_cache_flushinv_no_dcache
# read the addresses tagged in the cache's tag RAM and attempt to flush
# those addresses specifically
# - we rely on the hardware to filter out invalid tag entry addresses
mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
mov DCACHE_PURGE(0,0),a1 # dcache purge request address
mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,e0 # total number of entries
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
add L1_CACHE_BYTES,a0
add L1_CACHE_BYTES,a1
add -1,e0
bne mn10300_local_dcache_flush_loop
debugger_local_cache_flushinv_no_dcache:
#
# secondly, invalidate the icache if it is enabled
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
movhu (CHCTR),d1
btst CHCTR_DCEN|CHCTR_ICEN,d1
beq debugger_local_cache_flushinv_one_end
btst CHCTR_DCEN,d1
beq debugger_local_cache_flushinv_one_icache
# round cacheline addr down
and L1_CACHE_TAG_MASK,d0
mov d0,a1
# determine the dcache purge control reg address
mov DCACHE_PURGE(0,0),a0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0
# retain valid entries in the cache
or L1_CACHE_TAG_VALID,a1
# conditionally purge this line in all ways
mov a1,(L1_CACHE_WAYDISP*0,a0)
# now go and do the icache
bra debugger_local_cache_flushinv_one_icache
debugger_local_cache_flushinv_one_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one
/* MN10300 CPU cache invalidation routines, using automatic purge registers
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one:
mov d0,a1
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_range_reg_end
LOCAL_CLI_SAVE(d1)
mov ICIVCR,a0
# wait for the invalidator to quiesce
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
# set the mask
mov ~L1_CACHE_TAG_MASK,d0
mov d0,(ICIVMR)
# invalidate the cache line at the given address
and ~L1_CACHE_TAG_MASK,a1
or ICIVCR_ICI,a1
mov a1,(a0)
# wait for the invalidator to quiesce again
setlb
mov (a0),d0
btst ICIVCR_ICIVBSY,d0
lne
LOCAL_IRQ_RESTORE(d1)
mn10300_local_icache_inv_range_reg_end:
ret [],0
.size debugger_local_cache_flushinv_one,.-debugger_local_cache_flushinv_one
/* MN10300 CPU cache invalidation routines, using direct tag flushing
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv_one_icache
###############################################################################
#
# void debugger_local_cache_flushinv_one(u8 *addr)
#
# Invalidate one particular cacheline if it's in the icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv_one_icache
.type debugger_local_cache_flushinv_one_icache,@function
debugger_local_cache_flushinv_one_icache:
movm [d3,a2],(sp)
mov CHCTR,a2
movhu (a2),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_one_icache_end
mov d0,a1
and L1_CACHE_TAG_MASK,a1
# read the tags from the tag RAM, and if they indicate a matching valid
# cache line then we invalidate that line
mov ICACHE_TAG(0,0),a0
mov a1,d0
and L1_CACHE_TAG_ENTRY,d0
add d0,a0 # starting icache tag RAM
# access address
and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
or L1_CACHE_TAG_VALID,a1
mov L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_VALID,d1
LOCAL_CLI_SAVE(d3)
# disable the icache
movhu (a2),d0
and ~CHCTR_ICEN,d0
movhu d0,(a2)
# and wait for it to calm down
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# check all the way tags for this cache entry
mov (a0),d0 # read the tag in the way 0 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 1 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 2 slot
xor a1,d0
and d1,d0
beq debugger_local_icache_kill # jump if matched
add L1_CACHE_WAYDISP,a0
mov (a0),d0 # read the tag in the way 3 slot
xor a1,d0
and d1,d0
bne debugger_local_icache_finish # jump if not matched
debugger_local_icache_kill:
mov d0,(a0) # kill the tag (D0 is 0 at this point)
debugger_local_icache_finish:
# wait for the cache to finish what it's doing
setlb
movhu (a2),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a2)
movhu (a2),d0
# re-enable interrupts
LOCAL_IRQ_RESTORE(d3)
debugger_local_cache_flushinv_one_icache_end:
ret [d3,a2],8
.size debugger_local_cache_flushinv_one_icache,.-debugger_local_cache_flushinv_one_icache
#ifdef CONFIG_MN10300_DEBUGGER_CACHE_INV_BY_TAG
.globl debugger_local_cache_flushinv_one
.type debugger_local_cache_flushinv_one,@function
debugger_local_cache_flushinv_one = debugger_local_cache_flushinv_one_icache
#endif
/* MN10300 CPU cache invalidation routines
*
* Copyright (C) 2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/smp.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
.am33_2
.globl debugger_local_cache_flushinv
###############################################################################
#
# void debugger_local_cache_flushinv(void)
#
# Invalidate the entire icache
#
###############################################################################
ALIGN
.globl debugger_local_cache_flushinv
.type debugger_local_cache_flushinv,@function
debugger_local_cache_flushinv:
#
# we only need to invalidate the icache in this cache mode
#
mov CHCTR,a0
movhu (a0),d0
btst CHCTR_ICEN,d0
beq debugger_local_cache_flushinv_end
invalidate_icache 1
debugger_local_cache_flushinv_end:
ret [],0
.size debugger_local_cache_flushinv,.-debugger_local_cache_flushinv
......@@ -62,7 +62,7 @@ mn10300_local_dcache_flush:
mn10300_local_dcache_flush_loop:
mov (a0),d0
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
and L1_CACHE_TAG_MASK,d0
or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
# cache
mov d0,(a1) # conditional purge
......@@ -112,11 +112,11 @@ mn10300_local_dcache_flush_range:
1:
# round start addr down
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
and L1_CACHE_TAG_MASK,d0
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
and L1_CACHE_TAG_MASK,d1
# write a request to flush all instances of an address from the cache
mov DCACHE_PURGE(0,0),a0
......@@ -215,12 +215,11 @@ mn10300_local_dcache_flush_inv_range:
bra mn10300_local_dcache_flush_inv
1:
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov d0,a1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1
# write a request to flush and invalidate all instances of an address
# from the cache
......
......@@ -15,6 +15,7 @@
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
......@@ -62,10 +63,7 @@ mn10300_local_icache_inv:
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
......@@ -87,11 +85,8 @@ mn10300_local_dcache_inv:
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
.size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
......@@ -121,9 +116,9 @@ mn10300_local_dcache_inv_range:
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~(L1_CACHE_BYTES-1),d0
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~(L1_CACHE_BYTES-1),d1
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
......@@ -141,12 +136,11 @@ mn10300_local_dcache_inv_range:
# writeback mode, in which case we would be in flush and invalidate by
# now
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
mov L1_CACHE_BYTES-1,d2
add d2,d1
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
and L1_CACHE_TAG_MASK,d1 # round end addr up
#endif /* !CONFIG_MN10300_CACHE_WBACK */
sub d0,d1,d2 # calculate the total size
......
......@@ -15,6 +15,7 @@
#include <asm/cache.h>
#include <asm/irqflags.h>
#include <asm/cacheflush.h>
#include "cache.inc"
#define mn10300_local_dcache_inv_range_intr_interval \
+((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
......@@ -70,43 +71,7 @@ mn10300_local_icache_inv:
btst CHCTR_ICEN,d0
beq mn10300_local_icache_inv_end
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
LOCAL_CLI_SAVE(d1)
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
and ~CHCTR_ICINV,d0
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
LOCAL_IRQ_RESTORE(d1)
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
invalidate_icache 1
mn10300_local_icache_inv_end:
ret [],0
......@@ -128,43 +93,7 @@ mn10300_local_dcache_inv:
btst CHCTR_DCEN,d0
beq mn10300_local_dcache_inv_end
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
LOCAL_CLI_SAVE(d1)
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
mov CHCTR,a0
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
and ~CHCTR_DCINV,d0
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
LOCAL_IRQ_RESTORE(d1)
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
invalidate_dcache 1
mn10300_local_dcache_inv_end:
ret [],0
......@@ -195,9 +124,9 @@ mn10300_local_dcache_inv_range:
# and if they're not cacheline-aligned, we must flush any bits outside
# the range that share cachelines with stuff inside the range
#ifdef CONFIG_MN10300_CACHE_WBACK
btst ~(L1_CACHE_BYTES-1),d0
btst ~L1_CACHE_TAG_MASK,d0
bne 1f
btst ~(L1_CACHE_BYTES-1),d1
btst ~L1_CACHE_TAG_MASK,d1
beq 2f
1:
bra mn10300_local_dcache_flush_inv_range
......@@ -212,11 +141,10 @@ mn10300_local_dcache_inv_range:
beq mn10300_local_dcache_inv_range_end
#ifndef CONFIG_MN10300_CACHE_WBACK
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
# addr down
and L1_CACHE_TAG_MASK,d0 # round start addr down
add L1_CACHE_BYTES,d1 # round end addr up
and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
and L1_CACHE_TAG_MASK,d1
#endif /* !CONFIG_MN10300_CACHE_WBACK */
mov d0,a1
......
/* MN10300 CPU core caching macros -*- asm -*-
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
###############################################################################
#
# Invalidate the instruction cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the icache whilst we do this.
#
###############################################################################
.macro invalidate_icache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the icache
and ~CHCTR_ICEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_ICBUSY,d0
lne
# and reenable it
or CHCTR_ICEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_ICINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm
###############################################################################
#
# Invalidate the data cache.
# A0: Should hold CHCTR
# D0: Should have been read from CHCTR
# D1: Will be clobbered
#
# On some cores it is necessary to disable the dcache whilst we do this.
#
###############################################################################
.macro invalidate_dcache,disable_irq
#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
.if \disable_irq
# don't want an interrupt routine seeing a disabled cache
mov epsw,d1
and ~EPSW_IE,epsw
or EPSW_NMID,epsw
nop
nop
.endif
# disable the dcache
and ~CHCTR_DCEN,d0
movhu d0,(a0)
# and wait for it to calm down
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
# wait for the cache to finish
setlb
movhu (a0),d0
btst CHCTR_DCBUSY,d0
lne
# and reenable it
or CHCTR_DCEN,d0
movhu d0,(a0)
movhu (a0),d0
.if \disable_irq
LOCAL_IRQ_RESTORE(d1)
.endif
#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
# invalidate
or CHCTR_DCINV,d0
movhu d0,(a0)
movhu (a0),d0
#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
.endm
......@@ -28,8 +28,9 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/hardirq.h>
#include <asm/gdb-stub.h>
#include <asm/cpu-regs.h>
#include <asm/debugger.h>
#include <asm/gdb-stub.h>
/*
* Unlock any spinlocks which will prevent us from getting the
......@@ -306,10 +307,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
printk(" printing pc:\n");
printk(KERN_ALERT "%08lx\n", regs->pc);
#ifdef CONFIG_GDBSTUB
gdbstub_intercept(
regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
#endif
debugger_intercept(fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR,
SIGSEGV, SEGV_ACCERR, regs);
page = PTBR;
page = ((unsigned long *) __va(page))[address >> 22];
......
......@@ -23,6 +23,7 @@
#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
#define L1_CACHE_TAG_ENTRY 0x00000ff0 /* cache tag entry address mask */
#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
#define L1_CACHE_TAG_MASK +(L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY)
/*
* specification of the interval between interrupt checking intervals whilst
......
......@@ -29,6 +29,7 @@
#define L1_CACHE_TAG_DIRTY 0x00000008 /* data cache tag dirty bit */
#define L1_CACHE_TAG_ENTRY 0x00000fe0 /* cache tag entry address mask */
#define L1_CACHE_TAG_ADDRESS 0xfffff000 /* cache tag line address mask */
#define L1_CACHE_TAG_MASK +(L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY)
/*
* specification of the interval between interrupt checking intervals whilst
......
......@@ -297,6 +297,7 @@ extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
struct pt_regs *regs);
extern int kgdb_nmicallback(int cpu, void *regs);
extern void gdbstub_exit(int status);
extern int kgdb_single_step;
extern atomic_t kgdb_active;
......
......@@ -1093,3 +1093,33 @@ int gdbstub_state(struct kgdb_state *ks, char *cmd)
put_packet(remcom_out_buffer);
return 0;
}
/**
* gdbstub_exit - Send an exit message to GDB
* @status: The exit code to report.
*/
void gdbstub_exit(int status)
{
unsigned char checksum, ch, buffer[3];
int loop;
buffer[0] = 'W';
buffer[1] = hex_asc_hi(status);
buffer[2] = hex_asc_lo(status);
dbg_io_ops->write_char('$');
checksum = 0;
for (loop = 0; loop < 3; loop++) {
ch = buffer[loop];
checksum += ch;
dbg_io_ops->write_char(ch);
}
dbg_io_ops->write_char('#');
dbg_io_ops->write_char(hex_asc_hi(checksum));
dbg_io_ops->write_char(hex_asc_lo(checksum));
/* make sure the output is flushed, lest the bootloader clobber it */
dbg_io_ops->flush();
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment