Commit 58daf18c authored by Russell King's avatar Russell King

Merge branch 'clksrc' into devel

Conflicts:
	arch/arm/mach-vexpress/v2m.c
	arch/arm/plat-omap/counter_32k.c
	arch/arm/plat-versatile/Makefile
parents aa312be1 0af85dda
...@@ -14,6 +14,7 @@ config ARM ...@@ -14,6 +14,7 @@ config ARM
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
...@@ -38,6 +39,9 @@ config HAVE_PWM ...@@ -38,6 +39,9 @@ config HAVE_PWM
config SYS_SUPPORTS_APM_EMULATION config SYS_SUPPORTS_APM_EMULATION
bool bool
config HAVE_SCHED_CLOCK
bool
config GENERIC_GPIO config GENERIC_GPIO
bool bool
...@@ -233,6 +237,7 @@ config ARCH_REALVIEW ...@@ -233,6 +237,7 @@ config ARCH_REALVIEW
bool "ARM Ltd. RealView family" bool "ARM Ltd. RealView family"
select ARM_AMBA select ARM_AMBA
select COMMON_CLKDEV select COMMON_CLKDEV
select HAVE_SCHED_CLOCK
select ICST select ICST
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
...@@ -247,6 +252,7 @@ config ARCH_VERSATILE ...@@ -247,6 +252,7 @@ config ARCH_VERSATILE
select ARM_AMBA select ARM_AMBA
select ARM_VIC select ARM_VIC
select COMMON_CLKDEV select COMMON_CLKDEV
select HAVE_SCHED_CLOCK
select ICST select ICST
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
...@@ -263,6 +269,7 @@ config ARCH_VEXPRESS ...@@ -263,6 +269,7 @@ config ARCH_VEXPRESS
select COMMON_CLKDEV select COMMON_CLKDEV
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_CLK select HAVE_CLK
select HAVE_SCHED_CLOCK
select ICST select ICST
select PLAT_VERSATILE select PLAT_VERSATILE
help help
...@@ -434,6 +441,7 @@ config ARCH_IXP4XX ...@@ -434,6 +441,7 @@ config ARCH_IXP4XX
select CPU_XSCALE select CPU_XSCALE
select GENERIC_GPIO select GENERIC_GPIO
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select DMABOUNCE if PCI select DMABOUNCE if PCI
help help
Support for Intel's IXP4XX (XScale) family of processors. Support for Intel's IXP4XX (XScale) family of processors.
...@@ -509,6 +517,7 @@ config ARCH_MMP ...@@ -509,6 +517,7 @@ config ARCH_MMP
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select COMMON_CLKDEV select COMMON_CLKDEV
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select TICK_ONESHOT select TICK_ONESHOT
select PLAT_PXA select PLAT_PXA
select SPARSE_IRQ select SPARSE_IRQ
...@@ -565,6 +574,7 @@ config ARCH_TEGRA ...@@ -565,6 +574,7 @@ config ARCH_TEGRA
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_GPIO select GENERIC_GPIO
select HAVE_CLK select HAVE_CLK
select HAVE_SCHED_CLOCK
select COMMON_CLKDEV select COMMON_CLKDEV
select ARCH_HAS_BARRIERS if CACHE_L2X0 select ARCH_HAS_BARRIERS if CACHE_L2X0
select ARCH_HAS_CPUFREQ select ARCH_HAS_CPUFREQ
...@@ -588,6 +598,7 @@ config ARCH_PXA ...@@ -588,6 +598,7 @@ config ARCH_PXA
select COMMON_CLKDEV select COMMON_CLKDEV
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select TICK_ONESHOT select TICK_ONESHOT
select PLAT_PXA select PLAT_PXA
select SPARSE_IRQ select SPARSE_IRQ
...@@ -636,6 +647,7 @@ config ARCH_SA1100 ...@@ -636,6 +647,7 @@ config ARCH_SA1100
select CPU_FREQ select CPU_FREQ
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_CLK select HAVE_CLK
select HAVE_SCHED_CLOCK
select TICK_ONESHOT select TICK_ONESHOT
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
help help
...@@ -782,6 +794,7 @@ config ARCH_U300 ...@@ -782,6 +794,7 @@ config ARCH_U300
bool "ST-Ericsson U300 Series" bool "ST-Ericsson U300 Series"
depends on MMU depends on MMU
select CPU_ARM926T select CPU_ARM926T
select HAVE_SCHED_CLOCK
select HAVE_TCM select HAVE_TCM
select ARM_AMBA select ARM_AMBA
select ARM_VIC select ARM_VIC
...@@ -830,6 +843,7 @@ config ARCH_OMAP ...@@ -830,6 +843,7 @@ config ARCH_OMAP
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
select ARCH_HAS_HOLES_MEMORYMODEL select ARCH_HAS_HOLES_MEMORYMODEL
help help
Support for TI's OMAP platform (OMAP1/2/3/4). Support for TI's OMAP platform (OMAP1/2/3/4).
...@@ -983,9 +997,11 @@ config ARCH_ACORN ...@@ -983,9 +997,11 @@ config ARCH_ACORN
config PLAT_IOP config PLAT_IOP
bool bool
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select HAVE_SCHED_CLOCK
config PLAT_ORION config PLAT_ORION
bool bool
select HAVE_SCHED_CLOCK
config PLAT_PXA config PLAT_PXA
bool bool
...@@ -1212,10 +1228,11 @@ config SMP ...@@ -1212,10 +1228,11 @@ config SMP
depends on EXPERIMENTAL depends on EXPERIMENTAL
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
ARCH_MSM_SCORPIONMP
select USE_GENERIC_SMP_HELPERS select USE_GENERIC_SMP_HELPERS
select HAVE_ARM_SCU select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
help help
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If a system with only one CPU, like most personal computers, say N. If
...@@ -1290,6 +1307,7 @@ config NR_CPUS ...@@ -1290,6 +1307,7 @@ config NR_CPUS
config HOTPLUG_CPU config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL depends on SMP && HOTPLUG && EXPERIMENTAL
depends on !ARCH_MSM
help help
Say Y here to experiment with turning CPUs off and on. CPUs Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu. can be controlled through /sys/devices/system/cpu.
...@@ -1298,7 +1316,7 @@ config LOCAL_TIMERS ...@@ -1298,7 +1316,7 @@ config LOCAL_TIMERS
bool "Use local timer interrupts" bool "Use local timer interrupts"
depends on SMP depends on SMP
default y default y
select HAVE_ARM_TWD select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP
help help
Enable support for local timers on SMP platforms, rather then the Enable support for local timers on SMP platforms, rather then the
legacy IPI broadcast method. Local timers allows the system legacy IPI broadcast method. Local timers allows the system
......
...@@ -23,7 +23,7 @@ config STRICT_DEVMEM ...@@ -23,7 +23,7 @@ config STRICT_DEVMEM
config FRAME_POINTER config FRAME_POINTER
bool bool
depends on !THUMB2_KERNEL depends on !THUMB2_KERNEL
default y if !ARM_UNWIND default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
help help
If you say N here, the resulting kernel will be slightly smaller and If you say N here, the resulting kernel will be slightly smaller and
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled, faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
......
...@@ -44,7 +44,6 @@ static struct clocksource clocksource_sp804 = { ...@@ -44,7 +44,6 @@ static struct clocksource clocksource_sp804 = {
.rating = 200, .rating = 200,
.read = sp804_read, .read = sp804_read,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -61,8 +60,7 @@ void __init sp804_clocksource_init(void __iomem *base) ...@@ -61,8 +60,7 @@ void __init sp804_clocksource_init(void __iomem *base)
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC, writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
clksrc_base + TIMER_CTRL); clksrc_base + TIMER_CTRL);
cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift); clocksource_register_khz(cs, TIMER_FREQ_KHZ);
clocksource_register(cs);
} }
......
/*
* sched_clock.h: support for extending counters to full 64-bit ns counter
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_SCHED_CLOCK
#define ASM_SCHED_CLOCK
#include <linux/kernel.h>
#include <linux/types.h>
struct clock_data {
u64 epoch_ns;
u32 epoch_cyc;
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
};
#define DEFINE_CLOCK_DATA(name) struct clock_data name
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
/*
* Atomically update the sched_clock epoch. Your update callback will
* be called from a timer before the counter wraps - read the current
* counter value, and call this function to safely move the epochs
* forward. Only use this from the update callback.
*/
static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
{
unsigned long flags;
u64 ns = cd->epoch_ns +
cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
/*
* Write epoch_cyc and epoch_ns in a way that the update is
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
cd->epoch_cyc = cyc;
smp_wmb();
cd->epoch_ns = ns;
smp_wmb();
cd->epoch_cyc_copy = cyc;
raw_local_irq_restore(flags);
}
/*
* If your clock rate is known at compile time, using this will allow
* you to optimize the mult/shift loads away. This is paired with
* init_fixed_sched_clock() to ensure that your mult/shift are correct.
*/
static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
u32 cyc, u32 mask, u32 mult, u32 shift)
{
u64 epoch_ns;
u32 epoch_cyc;
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
* epoch_cyc_copy in strict order, and read them in strict order.
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
* the middle of an update, and we should repeat the load.
*/
do {
epoch_cyc = cd->epoch_cyc;
smp_rmb();
epoch_ns = cd->epoch_ns;
smp_rmb();
} while (epoch_cyc != cd->epoch_cyc_copy);
return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
}
/*
* Otherwise, you need to use this, which will obtain the mult/shift
* from the clock_data structure. Use init_sched_clock() with this.
*/
static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
u32 cyc, u32 mask)
{
return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
}
/*
* Initialize the clock data - calculate the appropriate multiplier
* and shift. Also setup a timer to ensure that the epoch is refreshed
* at the appropriate time interval, which will call your update
* handler.
*/
void init_sched_clock(struct clock_data *, void (*)(void),
unsigned int, unsigned long);
/*
* Use this initialization function rather than init_sched_clock() if
* you're using cyc_to_fixed_sched_clock, which will warn if your
* constants are incorrect.
*/
static inline void init_fixed_sched_clock(struct clock_data *cd,
void (*update)(void), unsigned int bits, unsigned long rate,
u32 mult, u32 shift)
{
init_sched_clock(cd, update, bits, rate);
if (cd->mult != mult || cd->shift != shift) {
pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
"sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
mult, shift, cd->mult, cd->shift);
}
}
#endif
...@@ -63,6 +63,11 @@ ...@@ -63,6 +63,11 @@
#include <asm/outercache.h> #include <asm/outercache.h>
#define __exception __attribute__((section(".exception.text"))) #define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
struct thread_info; struct thread_info;
struct task_struct; struct task_struct;
......
...@@ -15,13 +15,32 @@ struct undef_hook { ...@@ -15,13 +15,32 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook); void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
#else
static inline int __in_irqentry_text(unsigned long ptr)
{
return 0;
}
#endif
static inline int in_exception_text(unsigned long ptr) static inline int in_exception_text(unsigned long ptr)
{ {
extern char __exception_text_start[]; extern char __exception_text_start[];
extern char __exception_text_end[]; extern char __exception_text_end[];
int in;
in = ptr >= (unsigned long)&__exception_text_start &&
ptr < (unsigned long)&__exception_text_end;
return ptr >= (unsigned long)&__exception_text_start && return in ? : __in_irqentry_text(ptr);
ptr < (unsigned long)&__exception_text_end;
} }
extern void __init early_trap_init(void); extern void __init early_trap_init(void);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
ifdef CONFIG_DYNAMIC_FTRACE ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_ftrace.o = -pg
endif endif
...@@ -29,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o ...@@ -29,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
obj-$(CONFIG_ATAGS_PROC) += atags.o obj-$(CONFIG_ATAGS_PROC) += atags.o
......
...@@ -147,98 +147,170 @@ ENDPROC(ret_from_fork) ...@@ -147,98 +147,170 @@ ENDPROC(ret_from_fork)
#endif #endif
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE .macro __mcount suffix
ENTRY(__gnu_mcount_nc) mcount_enter
mov ip, lr ldr r0, =ftrace_trace_function
ldmia sp!, {lr} ldr r2, [r0]
mov pc, ip adr r0, .Lftrace_stub
ENDPROC(__gnu_mcount_nc) cmp r0, r2
bne 1f
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr r1, =ftrace_graph_return
ldr r2, [r1]
cmp r0, r2
bne ftrace_graph_caller\suffix
ldr r1, =ftrace_graph_entry
ldr r2, [r1]
ldr r0, =ftrace_graph_entry_stub
cmp r0, r2
bne ftrace_graph_caller\suffix
#endif
ENTRY(ftrace_caller) mcount_exit
stmdb sp!, {r0-r3, lr}
mov r0, lr 1: mcount_get_lr r1 @ lr of instrumented func
mov r0, lr @ instrumented function
sub r0, r0, #MCOUNT_INSN_SIZE
adr lr, BSYM(2f)
mov pc, r2
2: mcount_exit
.endm
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mov r0, lr @ instrumented function
sub r0, r0, #MCOUNT_INSN_SIZE sub r0, r0, #MCOUNT_INSN_SIZE
ldr r1, [sp, #20]
.global ftrace_call .globl ftrace_call\suffix
ftrace_call: ftrace_call\suffix:
bl ftrace_stub bl ftrace_stub
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENDPROC(ftrace_caller) .globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
mov r0, r0
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
#else
@ called from __mcount, untouched in lr
mov r1, lr @ instrumented routine (func)
#endif
sub r1, r1, #MCOUNT_INSN_SIZE
mov r2, fp @ frame pointer
bl prepare_ftrace_return
mcount_exit
.endm
#ifdef CONFIG_OLD_MCOUNT #ifdef CONFIG_OLD_MCOUNT
/*
* mcount
*/
.macro mcount_enter
stmdb sp!, {r0-r3, lr}
.endm
.macro mcount_get_lr reg
ldr \reg, [fp, #-4]
.endm
.macro mcount_exit
ldr lr, [fp, #-4]
ldmia sp!, {r0-r3, pc}
.endm
ENTRY(mcount) ENTRY(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
stmdb sp!, {lr} stmdb sp!, {lr}
ldr lr, [fp, #-4] ldr lr, [fp, #-4]
ldmia sp!, {pc} ldmia sp!, {pc}
#else
__mcount _old
#endif
ENDPROC(mcount) ENDPROC(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old) ENTRY(ftrace_caller_old)
stmdb sp!, {r0-r3, lr} __ftrace_caller _old
ldr r1, [fp, #-4]
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
.globl ftrace_call_old
ftrace_call_old:
bl ftrace_stub
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
ENDPROC(ftrace_caller_old) ENDPROC(ftrace_caller_old)
#endif #endif
#else #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
ENTRY(__gnu_mcount_nc) .purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
/*
* __gnu_mcount_nc
*/
.macro mcount_enter
stmdb sp!, {r0-r3, lr} stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function .endm
ldr r2, [r0]
adr r0, .Lftrace_stub .macro mcount_get_lr reg
cmp r0, r2 ldr \reg, [sp, #20]
bne gnu_trace .endm
.macro mcount_exit
ldmia sp!, {r0-r3, ip, lr} ldmia sp!, {r0-r3, ip, lr}
mov pc, ip mov pc, ip
.endm
gnu_trace: ENTRY(__gnu_mcount_nc)
ldr r1, [sp, #20] @ lr of instrumented routine #ifdef CONFIG_DYNAMIC_FTRACE
mov r0, lr mov ip, lr
sub r0, r0, #MCOUNT_INSN_SIZE ldmia sp!, {lr}
adr lr, BSYM(1f)
mov pc, r2
1:
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip mov pc, ip
#else
__mcount
#endif
ENDPROC(__gnu_mcount_nc) ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_OLD_MCOUNT #ifdef CONFIG_DYNAMIC_FTRACE
/* ENTRY(ftrace_caller)
* This is under an ifdef in order to force link-time errors for people trying __ftrace_caller
* to build with !FRAME_POINTER with a GCC which doesn't use the new-style ENDPROC(ftrace_caller)
* mcount. #endif
*/
ENTRY(mcount)
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
ldr r2, [r0]
adr r0, ftrace_stub
cmp r0, r2
bne trace
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
trace: #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr r1, [fp, #-4] @ lr of instrumented routine ENTRY(ftrace_graph_caller)
mov r0, lr __ftrace_graph_caller
sub r0, r0, #MCOUNT_INSN_SIZE ENDPROC(ftrace_graph_caller)
mov lr, pc
mov pc, r2
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
ENDPROC(mcount)
#endif #endif
#endif /* CONFIG_DYNAMIC_FTRACE */ .purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmdb sp!, {r0-r3}
mov r0, fp @ frame pointer
bl ftrace_return_to_handler
mov lr, r0 @ r0 has real ret addr
ldmia sp!, {r0-r3}
mov pc, lr
#endif
ENTRY(ftrace_stub) ENTRY(ftrace_stub)
.Lftrace_stub: .Lftrace_stub:
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define NOP 0xe8bd4000 /* pop {lr} */ #define NOP 0xe8bd4000 /* pop {lr} */
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_OLD_MCOUNT #ifdef CONFIG_OLD_MCOUNT
#define OLD_MCOUNT_ADDR ((unsigned long) mcount) #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
...@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) ...@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
} }
#endif #endif
/* construct a branch (BL) instruction to addr */
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
bool link)
{ {
unsigned long s, j1, j2, i1, i2, imm10, imm11; unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second; unsigned long first, second;
...@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) ...@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
j2 = (!i2) ^ s; j2 = (!i2) ^ s;
first = 0xf000 | (s << 10) | imm10; first = 0xf000 | (s << 10) | imm10;
second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11; second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
if (link)
second |= 1 << 14;
return (second << 16) | first; return (second << 16) | first;
} }
#else #else
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
bool link)
{ {
unsigned long opcode = 0xea000000;
long offset; long offset;
if (link)
opcode |= 1 << 24;
offset = (long)addr - (long)(pc + 8); offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) { if (unlikely(offset < -33554432 || offset > 33554428)) {
/* Can't generate branches that far (from ARM ARM). Ftrace /* Can't generate branches that far (from ARM ARM). Ftrace
...@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) ...@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
offset = (offset >> 2) & 0x00ffffff; offset = (offset >> 2) & 0x00ffffff;
return 0xeb000000 | offset; return opcode | offset;
} }
#endif #endif
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return ftrace_gen_branch(pc, addr, true);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old, static int ftrace_modify_code(unsigned long pc, unsigned long old,
unsigned long new) unsigned long new)
{ {
...@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data) ...@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
return 0; return 0;
} }
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
struct ftrace_graph_ent trace;
unsigned long old;
int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
*parent = return_hooker;
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
if (err == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_graph_call;
extern unsigned long ftrace_graph_call_old;
extern void ftrace_graph_caller_old(void);
static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable)
{
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
return ftrace_modify_code(pc, old, new);
}
static int ftrace_modify_graph_caller(bool enable)
{
int ret;
ret = __ftrace_modify_caller(&ftrace_graph_call,
ftrace_graph_caller,
enable);
#ifdef CONFIG_OLD_MCOUNT
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
ftrace_graph_caller_old,
enable);
#endif
return ret;
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/ftrace.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mach/irq.h> #include <asm/mach/irq.h>
...@@ -105,7 +106,8 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -105,7 +106,8 @@ int show_interrupts(struct seq_file *p, void *v)
* come via this function. Instead, they should provide their * come via this function. Instead, they should provide their
* own 'handler' * own 'handler'
*/ */
asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) asmlinkage void __exception_irq_entry
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* sched_clock.c: support for extending counters to full 64-bit ns counter
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <asm/sched_clock.h>
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
static void (*sched_clock_update_fn)(void);
static void sched_clock_poll(unsigned long wrap_ticks)
{
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
sched_clock_update_fn();
}
void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
unsigned int clock_bits, unsigned long rate)
{
unsigned long r, w;
u64 res, wrap;
char r_unit;
sched_clock_update_fn = update;
/* calculate the mult/shift to convert counter ticks to ns. */
clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);
r = rate;
if (r >= 4000000) {
r /= 1000000;
r_unit = 'M';
} else {
r /= 1000;
r_unit = 'k';
}
/* calculate how many ns until we wrap */
wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
do_div(wrap, NSEC_PER_MSEC);
w = wrap;
/* calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, cd->mult, cd->shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
clock_bits, r, r_unit, res, w);
/*
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
sched_clock_poll(sched_clock_timer.data);
/*
* Ensure that sched_clock() starts off at 0ns
*/
cd->epoch_ns = 0;
}
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/ftrace.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/cpu.h> #include <linux/cpu.h>
...@@ -456,7 +457,7 @@ static void ipi_timer(void) ...@@ -456,7 +457,7 @@ static void ipi_timer(void)
} }
#ifdef CONFIG_LOCAL_TIMERS #ifdef CONFIG_LOCAL_TIMERS
asmlinkage void __exception do_local_timer(struct pt_regs *regs) asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -543,7 +544,7 @@ static void ipi_cpu_stop(unsigned int cpu) ...@@ -543,7 +544,7 @@ static void ipi_cpu_stop(unsigned int cpu)
* *
* Bit 0 - Inter-processor function call * Bit 0 - Inter-processor function call
*/ */
asmlinkage void __exception do_IPI(struct pt_regs *regs) asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct ipi_data *ipi = &per_cpu(ipi_data, cpu); struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
......
...@@ -101,6 +101,7 @@ SECTIONS ...@@ -101,6 +101,7 @@ SECTIONS
__exception_text_start = .; __exception_text_start = .;
*(.exception.text) *(.exception.text)
__exception_text_end = .; __exception_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
LOCK_TEXT LOCK_TEXT
......
...@@ -101,7 +101,6 @@ static struct clocksource clk32k = { ...@@ -101,7 +101,6 @@ static struct clocksource clk32k = {
.rating = 150, .rating = 150,
.read = read_clk32k, .read = read_clk32k,
.mask = CLOCKSOURCE_MASK(20), .mask = CLOCKSOURCE_MASK(20),
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -201,8 +200,7 @@ void __init at91rm9200_timer_init(void) ...@@ -201,8 +200,7 @@ void __init at91rm9200_timer_init(void)
clockevents_register_device(&clkevt); clockevents_register_device(&clkevt);
/* register clocksource */ /* register clocksource */
clk32k.mult = clocksource_hz2mult(AT91_SLOW_CLOCK, clk32k.shift); clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
clocksource_register(&clk32k);
} }
struct sys_timer at91rm9200_timer = { struct sys_timer at91rm9200_timer = {
......
...@@ -51,7 +51,6 @@ static struct clocksource pit_clk = { ...@@ -51,7 +51,6 @@ static struct clocksource pit_clk = {
.name = "pit", .name = "pit",
.rating = 175, .rating = 175,
.read = read_pit_clk, .read = read_pit_clk,
.shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -163,10 +162,9 @@ static void __init at91sam926x_pit_init(void) ...@@ -163,10 +162,9 @@ static void __init at91sam926x_pit_init(void)
* Register clocksource. The high order bits of PIV are unused, * Register clocksource. The high order bits of PIV are unused,
* so this isn't a 32-bit counter unless we get clockevent irqs. * so this isn't a 32-bit counter unless we get clockevent irqs.
*/ */
pit_clk.mult = clocksource_hz2mult(pit_rate, pit_clk.shift);
bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */; bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
pit_clk.mask = CLOCKSOURCE_MASK(bits); pit_clk.mask = CLOCKSOURCE_MASK(bits);
clocksource_register(&pit_clk); clocksource_register_hz(&pit_clk, pit_rate);
/* Set up irq handler */ /* Set up irq handler */
setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq); setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
......
This diff is collapsed.
...@@ -276,7 +276,6 @@ static struct clocksource clocksource_davinci = { ...@@ -276,7 +276,6 @@ static struct clocksource clocksource_davinci = {
.rating = 300, .rating = 300,
.read = read_cycles, .read = read_cycles,
.mask = CLOCKSOURCE_MASK(32), .mask = CLOCKSOURCE_MASK(32),
.shift = 24,
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
}; };
...@@ -378,10 +377,8 @@ static void __init davinci_timer_init(void) ...@@ -378,10 +377,8 @@ static void __init davinci_timer_init(void)
/* setup clocksource */ /* setup clocksource */
clocksource_davinci.name = id_to_name[clocksource_id]; clocksource_davinci.name = id_to_name[clocksource_id];
clocksource_davinci.mult = if (clocksource_register_hz(&clocksource_davinci,
clocksource_khz2mult(davinci_clock_tick_rate/1000, davinci_clock_tick_rate))
clocksource_davinci.shift);
if (clocksource_register(&clocksource_davinci))
printk(err, clocksource_davinci.name); printk(err, clocksource_davinci.name);
/* setup clockevent */ /* setup clockevent */
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -49,6 +49,8 @@ endchoice ...@@ -49,6 +49,8 @@ endchoice
config MSM_SOC_REV_A config MSM_SOC_REV_A
bool bool
config ARCH_MSM_SCORPIONMP
bool
config ARCH_MSM_ARM11 config ARCH_MSM_ARM11
bool bool
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -14,6 +14,7 @@ if PLAT_NOMADIK ...@@ -14,6 +14,7 @@ if PLAT_NOMADIK
config HAS_MTU config HAS_MTU
bool bool
select HAVE_SCHED_CLOCK
help help
Support for Multi Timer Unit. MTU provides access Support for Multi Timer Unit. MTU provides access
to multiple interrupt generating programmable to multiple interrupt generating programmable
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment