Commit 446c92b2 authored by Uwe Kleine-König's avatar Uwe Kleine-König Committed by Russell King

[ARM] 5421/1: ftrace: fix crash due to tracing of __naked functions

This is a fix for the following crash observed in 2.6.29-rc3:
http://lkml.org/lkml/2009/1/29/150

On ARM it doesn't make sense to trace a naked function because then
mcount is called without stack and frame pointer being set up and there
is no chance to restore the lr register to the value before mcount was
called.
Reported-by: default avatarMatthias Kaehlcke <matthias@kaehlcke.net>
Tested-by: default avatarMatthias Kaehlcke <matthias@kaehlcke.net>

Cc: Abhishek Sagar <sagar.abhishek@gmail.com>
Cc: Steven Rostedt <rostedt@home.goodmis.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarUwe Kleine-König <u.kleine-koenig@pengutronix.de>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9311c593
...@@ -88,7 +88,7 @@ void set_fiq_handler(void *start, unsigned int length) ...@@ -88,7 +88,7 @@ void set_fiq_handler(void *start, unsigned int length)
* disable irqs for the duration. Note - these functions are almost * disable irqs for the duration. Note - these functions are almost
* entirely coded in assembly. * entirely coded in assembly.
*/ */
void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) void __naked set_fiq_regs(struct pt_regs *regs)
{ {
register unsigned long tmp; register unsigned long tmp;
asm volatile ( asm volatile (
...@@ -106,7 +106,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) ...@@ -106,7 +106,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
} }
void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) void __naked get_fiq_regs(struct pt_regs *regs)
{ {
register unsigned long tmp; register unsigned long tmp;
asm volatile ( asm volatile (
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h> #include <linux/highmem.h>
static void __attribute__((naked)) static void __naked
feroceon_copy_user_page(void *kto, const void *kfrom) feroceon_copy_user_page(void *kto, const void *kfrom)
{ {
asm("\ asm("\
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* FIXME: do we need to handle cache stuff... * FIXME: do we need to handle cache stuff...
*/ */
static void __attribute__((naked)) static void __naked
v3_copy_user_page(void *kto, const void *kfrom) v3_copy_user_page(void *kto, const void *kfrom)
{ {
asm("\n\ asm("\n\
......
...@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock); ...@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock);
* instruction. If your processor does not supply this, you have to write your * instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing. * own copy_user_highpage that does the right thing.
*/ */
static void __attribute__((naked)) static void __naked
mc_copy_user_page(void *from, void *to) mc_copy_user_page(void *from, void *to)
{ {
asm volatile( asm volatile(
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* instruction. If your processor does not supply this, you have to write your * instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing. * own copy_user_highpage that does the right thing.
*/ */
static void __attribute__((naked)) static void __naked
v4wb_copy_user_page(void *kto, const void *kfrom) v4wb_copy_user_page(void *kto, const void *kfrom)
{ {
asm("\ asm("\
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* dirty data in the cache. However, we do have to ensure that * dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date. * subsequent reads are up to date.
*/ */
static void __attribute__((naked)) static void __naked
v4wt_copy_user_page(void *kto, const void *kfrom) v4wt_copy_user_page(void *kto, const void *kfrom)
{ {
asm("\ asm("\
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* if we eventually end up using our copied page. * if we eventually end up using our copied page.
* *
*/ */
static void __attribute__((naked)) static void __naked
xsc3_mc_copy_user_page(void *kto, const void *kfrom) xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{ {
asm("\ asm("\
......
...@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); ...@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
* Dcache aliasing issue. The writes will be forwarded to the write buffer, * Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate. * and merged as appropriate.
*/ */
static void __attribute__((naked)) static void __naked
mc_copy_user_page(void *from, void *to) mc_copy_user_page(void *from, void *to)
{ {
/* /*
......
...@@ -52,7 +52,15 @@ ...@@ -52,7 +52,15 @@
#define __deprecated __attribute__((deprecated)) #define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed)) #define __packed __attribute__((packed))
#define __weak __attribute__((weak)) #define __weak __attribute__((weak))
#define __naked __attribute__((naked))
/*
* it doesn't make sense on ARM (currently the only user of __naked) to trace
* naked functions because then mcount is called without stack and frame pointer
* being set up and there is no chance to restore the lr register to the value
* before mcount was called.
*/
#define __naked __attribute__((naked)) notrace
#define __noreturn __attribute__((noreturn)) #define __noreturn __attribute__((noreturn))
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment