Commit b283f09c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Fix get_wchan() FIXME wrt. order of functions

From: William Lee Irwin III <wli@holomorphy.com>

This addresses the issue with get_wchan() that the various functions acting
as scheduling-related primitives are not, in fact, contiguous in the text
segment.  It creates an ELF section for scheduling primitives to be placed
in, and places currently-detected (i.e.  skipped during stack decoding)
scheduling primitives and others like io_schedule() and down(), which are
currently missed by get_wchan() code, into this section also.

The net effects are more reliability of get_wchan()'s results and the new
ability, made use of by this code, to arbitrarily place scheduling
primitives in the source code without disturbing get_wchan()'s accuracy.

Suggestions by Arnd Bergmann and Matthew Wilcox regarding reducing the
invasiveness of the patch were incorporated during prior rounds of review. 
I've at least tried to sweep all arches in this patch.
parent ee28db84
......@@ -513,8 +513,6 @@ thread_saved_pc(task_t *t)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/init.h>
/*
* This is basically the PPC semaphore scheme ported to use
......@@ -60,7 +61,7 @@ static inline int __sem_update_count(struct semaphore *sem, int incr)
* Either form may be used in conjunction with "up()".
*/
void
void __sched
__down_failed(struct semaphore *sem)
{
struct task_struct *tsk = current;
......@@ -101,7 +102,7 @@ __down_failed(struct semaphore *sem)
#endif
}
int
int __sched
__down_failed_interruptible(struct semaphore *sem)
{
struct task_struct *tsk = current;
......@@ -159,7 +160,7 @@ __up_wakeup(struct semaphore *sem)
wake_up(&sem->wait);
}
void
void __sched
down(struct semaphore *sem)
{
#if WAITQUEUE_DEBUG
......@@ -173,7 +174,7 @@ down(struct semaphore *sem)
__down(sem);
}
int
int __sched
down_interruptible(struct semaphore *sem)
{
#if WAITQUEUE_DEBUG
......
......@@ -17,6 +17,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} :kernel
......
......@@ -414,8 +414,6 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -13,6 +13,7 @@
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
......@@ -54,7 +55,7 @@ void __up(struct semaphore *sem)
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -87,7 +88,7 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......@@ -176,7 +177,8 @@ int __down_trylock(struct semaphore * sem)
* registers (r0 to r3 and lr), but not ip, as we use it as a return
* value in some cases..
*/
asm(" .align 5 \n\
asm(" .section .sched.text \n\
.align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
......
......@@ -73,6 +73,7 @@ SECTIONS
.text : { /* Real text segment */
_text = .; /* Text and read-only data */
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
*(.rodata)
......
......@@ -400,8 +400,6 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -15,6 +15,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
......@@ -56,7 +57,7 @@ void __up(struct semaphore *sem)
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -89,7 +90,7 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......@@ -178,7 +179,8 @@ int __down_trylock(struct semaphore * sem)
* registers (r0 to r3 and lr), but not ip, as we use it as a return
* value in some cases..
*/
asm(" .align 5 \n\
asm(" .section .sched.text \n\
.align 5 \n\
.globl __down_failed \n\
__down_failed: \n\
stmfd sp!, {r0 - r3, lr} \n\
......
......@@ -66,6 +66,7 @@ SECTIONS
.text : { /* Real text segment */
_text = .; /* Text and read-only data */
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
*(.rodata)
......
......@@ -67,6 +67,7 @@ SECTIONS
.text : { /* Real text segment */
_text = .; /* Text and read-only data */
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
*(.rodata)
......
......@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
#ifdef CONFIG_ETRAX_GPIO
void etrax_gpio_wake_up_check(void); /* drivers/gpio.c */
......@@ -216,8 +217,6 @@ asmlinkage int sys_execve(const char *fname, char **argv, char **envp,
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -25,6 +25,7 @@ SECTIONS
__stext = .;
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.text.__*)
}
......
......@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
/*
......@@ -94,7 +95,7 @@ void __up(struct semaphore *sem)
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
......@@ -104,7 +105,7 @@ void __down(struct semaphore * sem)
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
......
......@@ -264,8 +264,6 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......@@ -289,7 +287,6 @@ unsigned long get_wchan(struct task_struct *p)
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
......
......@@ -5,6 +5,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
......@@ -95,7 +96,7 @@ void __up(struct semaphore *sem)
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
......@@ -106,7 +107,7 @@ void __down(struct semaphore * sem)
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
......
......@@ -82,6 +82,7 @@ SECTIONS
#endif
__stext = . ;
*(.text)
SCHED_TEXT
. = ALIGN(0x4) ;
*(.exit.text)
*(.text.*)
......
......@@ -632,8 +632,6 @@ asmlinkage int sys_execve(struct pt_regs regs)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
......
......@@ -15,6 +15,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
#include <asm/semaphore.h>
/*
......@@ -53,7 +54,7 @@ asmlinkage void __up(struct semaphore *sem)
wake_up(&sem->wait);
}
asmlinkage void __down(struct semaphore * sem)
asmlinkage void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -90,7 +91,7 @@ asmlinkage void __down(struct semaphore * sem)
tsk->state = TASK_RUNNING;
}
asmlinkage int __down_interruptible(struct semaphore * sem)
asmlinkage int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......@@ -187,7 +188,7 @@ asmlinkage int __down_trylock(struct semaphore * sem)
* value..
*/
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __down_failed\n"
"__down_failed:\n\t"
......@@ -210,7 +211,7 @@ asm(
);
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __down_failed_interruptible\n"
"__down_failed_interruptible:\n\t"
......@@ -231,7 +232,7 @@ asm(
);
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __down_failed_trylock\n"
"__down_failed_trylock:\n\t"
......@@ -252,7 +253,7 @@ asm(
);
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __up_wakeup\n"
"__up_wakeup:\n\t"
......@@ -271,7 +272,7 @@ asm(
*/
#if defined(CONFIG_SMP)
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __write_lock_failed\n"
"__write_lock_failed:\n\t"
......@@ -285,7 +286,7 @@ asm(
);
asm(
".text\n"
".section .sched.text\n"
".align 4\n"
".globl __read_lock_failed\n"
"__read_lock_failed:\n\t"
......
......@@ -16,6 +16,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x9090
......
......@@ -660,8 +660,6 @@ get_wchan (struct task_struct *p)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
# define first_sched ((unsigned long) scheduling_functions_start_here)
# define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -24,6 +24,7 @@
* <asm/semaphore.h> where we want to avoid any extra jumps and calls.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
......@@ -44,8 +45,7 @@ __up (struct semaphore *sem)
wake_up(&sem->wait);
}
void
__down (struct semaphore *sem)
void __sched __down (struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -82,8 +82,7 @@ __down (struct semaphore *sem)
tsk->state = TASK_RUNNING;
}
int
__down_interruptible (struct semaphore * sem)
int __sched __down_interruptible (struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -41,6 +41,7 @@ SECTIONS
{
*(.text.ivt)
*(.text)
SCHED_TEXT
*(.gnu.linkonce.t*)
}
.text2 : AT(ADDR(.text2) - LOAD_OFFSET)
......
......@@ -65,8 +65,6 @@ asmlinkage void ret_from_fork(void);
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (sw->retpc > (unsigned long)scheduling_functions_start_here &&
......@@ -387,8 +385,6 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......@@ -407,7 +403,6 @@ unsigned long get_wchan(struct task_struct *p)
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
......
......@@ -5,6 +5,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
......@@ -95,7 +96,7 @@ void __up(struct semaphore *sem)
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
......@@ -106,7 +107,7 @@ void __down(struct semaphore * sem)
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
......
......@@ -12,6 +12,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x4e75
......
......@@ -13,6 +13,7 @@ SECTIONS
.text : {
*(.head)
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x4e75
......
......@@ -406,8 +406,6 @@ asmlinkage int sys_execve(char *name, char **argv, char **envp)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......@@ -426,7 +424,6 @@ unsigned long get_wchan(struct task_struct *p)
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
......@@ -439,8 +436,6 @@ unsigned long get_wchan(struct task_struct *p)
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
......
......@@ -6,6 +6,7 @@
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/init.h>
#include <asm/semaphore-helper.h>
#ifndef CONFIG_RMW_INSNS
......@@ -96,7 +97,7 @@ void __up(struct semaphore *sem)
current->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
......@@ -107,7 +108,7 @@ void __down(struct semaphore * sem)
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
......
......@@ -191,6 +191,7 @@ SECTIONS {
.text : {
_stext = . ;
*(.text)
SCHED_TEXT
*(.text.lock)
. = ALIGN(16); /* Exception table */
......
......@@ -283,8 +283,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -6,6 +6,7 @@
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#ifdef CONFIG_CPU_HAS_LLDSCD
......@@ -104,7 +105,7 @@ static inline int waking_non_zero(struct semaphore *sem)
* Either form may be used in conjunction with "up()".
*/
void __down_failed(struct semaphore * sem)
void __sched __down_failed(struct semaphore * sem)
{
struct task_struct *tsk = current;
wait_queue_t wait;
......@@ -227,7 +228,7 @@ static inline int waking_non_zero_interruptible(struct semaphore *sem,
#endif /* !CONFIG_CPU_HAS_LLDSCD */
int __down_failed_interruptible(struct semaphore * sem)
int __sched __down_failed_interruptible(struct semaphore * sem)
{
struct task_struct *tsk = current;
wait_queue_t wait;
......
......@@ -28,6 +28,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} =0
......
......@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/init.h>
/*
* Semaphores are complex as we wish to avoid using two variables.
......@@ -58,7 +59,7 @@ void __up(struct semaphore *sem)
sem->count += (sem->count < 0) ? 1 : - 1;
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DOWN_HEAD
......@@ -74,7 +75,7 @@ void __down(struct semaphore * sem)
UPDATE_COUNT
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
DOWN_HEAD
......
......@@ -50,6 +50,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text ALIGN(16) : {
*(.text*)
SCHED_TEXT
*(.PARISC.unwind)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
......
......@@ -661,8 +661,6 @@ void __init ll_puts(const char *s)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -15,6 +15,7 @@
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
......@@ -69,7 +70,7 @@ void __up(struct semaphore *sem)
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __down(struct semaphore *sem)
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -99,7 +100,7 @@ void __down(struct semaphore *sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -31,6 +31,7 @@ SECTIONS
.text :
{
*(.text)
SCHED_TEXT
*(.fixup)
*(.got1)
__got2_start = .;
......
......@@ -475,8 +475,6 @@ static inline int validate_sp(unsigned long sp, struct task_struct *p)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched (*(unsigned long *)scheduling_functions_start_here)
#define last_sched (*(unsigned long *)scheduling_functions_end_here)
......
......@@ -17,6 +17,7 @@
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
......@@ -70,7 +71,7 @@ void __up(struct semaphore *sem)
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __down(struct semaphore *sem)
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -99,7 +100,7 @@ void __down(struct semaphore *sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -13,6 +13,7 @@ SECTIONS
/* Read-only sections, merged into text segment: */
.text : {
*(.text .text.*)
SCHED_TEXT
*(.fixup)
. = ALIGN(4096);
_etext = .;
......
......@@ -384,8 +384,6 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -11,6 +11,7 @@
*/
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
......@@ -60,7 +61,7 @@ void __up(struct semaphore *sem)
* count > 0: decrement count, wake up queue and exit.
* count <= 0: set count to -1, go to sleep.
*/
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -82,7 +83,7 @@ void __down(struct semaphore * sem)
* count > 0: wake up queue and exit.
* count <= 0: set count to 0, wake up queue and exit.
*/
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -23,6 +23,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x0700
......
......@@ -464,8 +464,6 @@ asmlinkage int sys_execve(char *ufilename, char **uargv,
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......@@ -481,7 +479,7 @@ unsigned long get_wchan(struct task_struct *p)
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
if (pc >= (unsigned long) interruptible_sleep_on && pc < (unsigned long) add_timer) {
if (pc >= first_sched && pc < last_sched) {
schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
return (unsigned long)((unsigned long *)schedule_frame)[1];
}
......
......@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/init.h>
#include <asm/semaphore.h>
#include <asm/semaphore-helper.h>
......@@ -103,7 +104,7 @@ void __up(struct semaphore *sem)
tsk->state = TASK_RUNNING; \
remove_wait_queue(&sem->wait, &wait);
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
DOWN_VAR
DOWN_HEAD(TASK_UNINTERRUPTIBLE)
......@@ -113,7 +114,7 @@ void __down(struct semaphore * sem)
DOWN_TAIL(TASK_UNINTERRUPTIBLE)
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int ret = 0;
DOWN_VAR
......
......@@ -22,6 +22,7 @@ SECTIONS
} = 0
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x0009
......
......@@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/init.h>
#include <asm/auxio.h>
#include <asm/oplib.h>
......@@ -694,9 +695,6 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
return retval;
}
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
......
......@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/semaphore.h>
......@@ -45,7 +46,7 @@ void __up(struct semaphore *sem)
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -78,7 +79,7 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -12,6 +12,7 @@ SECTIONS
.text 0xf0004000 :
{
*(.text)
SCHED_TEXT
*(.gnu.warning)
} =0
_etext = .;
......
......@@ -8,7 +8,7 @@
#include <asm/ptrace.h>
#include <asm/psr.h>
.text
.section .sched.text
.align 4
.globl ___down_read
......@@ -113,6 +113,7 @@ ___down_write:
ba 2b
restore %l5, %g0, %g5
.text
.globl ___up_read
___up_read:
rd %psr, %g3
......
......@@ -28,6 +28,7 @@
#include <linux/config.h>
#include <linux/reboot.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/oplib.h>
#include <asm/uaccess.h>
......@@ -823,9 +824,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
return error;
}
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
......
......@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/init.h>
/*
* Atomically update sem->count.
......@@ -90,7 +91,7 @@ void up(struct semaphore *sem)
: "g5", "g7", "memory", "cc");
}
static void __down(struct semaphore * sem)
static void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -108,7 +109,7 @@ static void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
void down(struct semaphore *sem)
void __sched down(struct semaphore *sem)
{
might_sleep();
/* This atomically does:
......@@ -192,7 +193,7 @@ int down_trylock(struct semaphore *sem)
return ret;
}
static int __down_interruptible(struct semaphore * sem)
static int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......@@ -216,7 +217,7 @@ static int __down_interruptible(struct semaphore * sem)
return retval;
}
int down_interruptible(struct semaphore *sem)
int __sched down_interruptible(struct semaphore *sem)
{
int ret = 0;
......
......@@ -15,6 +15,7 @@ SECTIONS
.text 0x0000000000404000 :
{
*(.text)
SCHED_TEXT
*(.gnu.warning)
} =0
_etext = .;
......
......@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/module.h>
extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
......@@ -13,7 +14,7 @@ extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore
extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *));
extern struct rw_semaphore *FASTCALL(rwsem_downgrade_wake(struct rw_semaphore *));
void __down_read(struct rw_semaphore *sem)
void __sched __down_read(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __down_read\n"
......@@ -72,7 +73,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
}
EXPORT_SYMBOL(__down_read_trylock);
void __down_write(struct rw_semaphore *sem)
void __sched __down_write(struct rw_semaphore *sem)
{
__asm__ __volatile__(
"! beginning __down_write\n\t"
......
......@@ -203,8 +203,6 @@ int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here (void);
extern void scheduling_functions_end_here (void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......@@ -228,7 +226,6 @@ unsigned long get_wchan (struct task_struct *p)
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
/* FIXME: This depends on the order of these functions. */
if (pc < first_sched || pc >= last_sched)
return pc;
fp = *(unsigned long *) fp;
......
......@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/semaphore.h>
......@@ -56,7 +57,7 @@ void __up(struct semaphore *sem)
static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -89,7 +90,7 @@ void __down(struct semaphore * sem)
wake_up(&sem->wait);
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -64,6 +64,7 @@
#define TEXT_CONTENTS \
__stext = . ; \
*(.text) \
SCHED_TEXT
*(.exit.text) /* 2.5 convention */ \
*(.text.exit) /* 2.4 convention */ \
*(.text.lock) \
......
......@@ -576,8 +576,6 @@ asmlinkage long sys_vfork(struct pt_regs regs)
/*
* These bracket the sleeping functions..
*/
extern void scheduling_functions_start_here(void);
extern void scheduling_functions_end_here(void);
#define first_sched ((unsigned long) scheduling_functions_start_here)
#define last_sched ((unsigned long) scheduling_functions_end_here)
......
......@@ -14,6 +14,7 @@
*/
#include <linux/config.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/errno.h>
#include <asm/semaphore.h>
......@@ -54,7 +55,7 @@ void __up(struct semaphore *sem)
wake_up(&sem->wait);
}
void __down(struct semaphore * sem)
void __sched __down(struct semaphore * sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
......@@ -91,7 +92,7 @@ void __down(struct semaphore * sem)
tsk->state = TASK_RUNNING;
}
int __down_interruptible(struct semaphore * sem)
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
......
......@@ -15,6 +15,7 @@ SECTIONS
_text = .; /* Text and read-only data */
.text : {
*(.text)
SCHED_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x9090
......
......@@ -35,6 +35,7 @@
.endm
.section .sched.text
#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
thunk rwsem_down_read_failed_thunk,rwsem_down_read_failed
thunk rwsem_down_write_failed_thunk,rwsem_down_write_failed
......@@ -65,7 +66,7 @@ restore_norax:
#ifdef CONFIG_SMP
/* Support for read/write spinlocks. */
.text
/* rax: pointer to rwlock_t */
ENTRY(__write_lock_failed)
lock
......
......@@ -51,3 +51,8 @@
*(.security_initcall.init) \
__security_initcall_end = .; \
}
#define SCHED_TEXT \
__scheduling_functions_start_here = .; \
*(.sched.text) \
__scheduling_functions_end_here = .;
......@@ -46,6 +46,8 @@
#define __exitdata __attribute__ ((__section__(".exit.data")))
#define __exit_call __attribute_used__ __attribute__ ((__section__ (".exitcall.exit")))
#define __sched __attribute__((__section__(".sched.text")))
#ifdef MODULE
#define __exit __attribute__ ((__section__(".exit.text")))
#else
......
......@@ -170,6 +170,8 @@ extern void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu);
extern void scheduler_tick(int user_tick, int system);
extern unsigned long cache_decay_ticks;
extern const unsigned long scheduling_functions_start_here;
extern const unsigned long scheduling_functions_end_here;
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
......
......@@ -225,6 +225,13 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
extern unsigned long __scheduling_functions_start_here;
extern unsigned long __scheduling_functions_end_here;
const unsigned long scheduling_functions_start_here =
(unsigned long)&__scheduling_functions_start_here;
const unsigned long scheduling_functions_end_here =
(unsigned long)&__scheduling_functions_end_here;
/*
* Default context-switch locking:
*/
......@@ -1587,12 +1594,10 @@ void scheduler_tick(int user_ticks, int sys_ticks)
rebalance_tick(rq, 0);
}
void scheduling_functions_start_here(void) { }
/*
* schedule() is the main scheduler function.
*/
asmlinkage void schedule(void)
asmlinkage void __sched schedule(void)
{
long *switch_count;
task_t *prev, *next;
......@@ -1731,7 +1736,7 @@ EXPORT_SYMBOL(schedule);
* off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void preempt_schedule(void)
asmlinkage void __sched preempt_schedule(void)
{
struct thread_info *ti = current_thread_info();
......@@ -1869,7 +1874,7 @@ void fastcall complete_all(struct completion *x)
spin_unlock_irqrestore(&x->wait.lock, flags);
}
void fastcall wait_for_completion(struct completion *x)
void fastcall __sched wait_for_completion(struct completion *x)
{
might_sleep();
spin_lock_irq(&x->wait.lock);
......@@ -1907,7 +1912,7 @@ EXPORT_SYMBOL(wait_for_completion);
__remove_wait_queue(q, &wait); \
spin_unlock_irqrestore(&q->lock, flags);
void fastcall interruptible_sleep_on(wait_queue_head_t *q)
void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
......@@ -1920,7 +1925,7 @@ void fastcall interruptible_sleep_on(wait_queue_head_t *q)
EXPORT_SYMBOL(interruptible_sleep_on);
long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
......@@ -1935,7 +1940,7 @@ long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void fastcall sleep_on(wait_queue_head_t *q)
void fastcall __sched sleep_on(wait_queue_head_t *q)
{
SLEEP_ON_VAR
......@@ -1948,7 +1953,7 @@ void fastcall sleep_on(wait_queue_head_t *q)
EXPORT_SYMBOL(sleep_on);
long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
SLEEP_ON_VAR
......@@ -1963,8 +1968,6 @@ long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
EXPORT_SYMBOL(sleep_on_timeout);
void scheduling_functions_end_here(void) { }
void set_user_nice(task_t *p, long nice)
{
unsigned long flags;
......@@ -2424,7 +2427,7 @@ asmlinkage long sys_sched_yield(void)
return 0;
}
void __cond_resched(void)
void __sched __cond_resched(void)
{
set_current_state(TASK_RUNNING);
schedule();
......@@ -2438,7 +2441,7 @@ EXPORT_SYMBOL(__cond_resched);
* this is a shortcut for kernel-space yielding - it marks the
* thread runnable and calls sys_sched_yield().
*/
void yield(void)
void __sched yield(void)
{
set_current_state(TASK_RUNNING);
sys_sched_yield();
......@@ -2453,7 +2456,7 @@ EXPORT_SYMBOL(yield);
* But don't do that if it is a deliberate, throttling IO wait (this task
* has set its backing_dev_info: the queue against which it should throttle)
*/
void io_schedule(void)
void __sched io_schedule(void)
{
struct runqueue *rq = this_rq();
......@@ -2464,7 +2467,7 @@ void io_schedule(void)
EXPORT_SYMBOL(io_schedule);
long io_schedule_timeout(long timeout)
long __sched io_schedule_timeout(long timeout)
{
struct runqueue *rq = this_rq();
long ret;
......@@ -3010,7 +3013,7 @@ EXPORT_SYMBOL(__might_sleep);
*
* Called inside preempt_disable().
*/
void __preempt_spin_lock(spinlock_t *lock)
void __sched __preempt_spin_lock(spinlock_t *lock)
{
if (preempt_count() > 1) {
_raw_spin_lock(lock);
......@@ -3026,7 +3029,7 @@ void __preempt_spin_lock(spinlock_t *lock)
EXPORT_SYMBOL(__preempt_spin_lock);
void __preempt_write_lock(rwlock_t *lock)
void __sched __preempt_write_lock(rwlock_t *lock)
{
if (preempt_count() > 1) {
_raw_write_lock(lock);
......
......@@ -996,7 +996,7 @@ static void process_timeout(unsigned long __data)
*
* In all cases the return value is guaranteed to be non-negative.
*/
fastcall signed long schedule_timeout(signed long timeout)
fastcall signed long __sched schedule_timeout(signed long timeout)
{
struct timer_list timer;
unsigned long expire;
......@@ -1056,7 +1056,7 @@ asmlinkage long sys_gettid(void)
return current->pid;
}
static long nanosleep_restart(struct restart_block *restart)
static long __sched nanosleep_restart(struct restart_block *restart)
{
unsigned long expire = restart->arg0, now = jiffies;
struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
......
......@@ -5,6 +5,7 @@
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
struct rwsem_waiter {
......@@ -162,7 +163,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
/*
* wait for the read lock to be granted
*/
struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
......@@ -178,7 +179,7 @@ struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
/*
* wait for the write lock to be granted
*/
struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment