Commit 20e39386 authored by Andrew Morton's avatar Andrew Morton Committed by Jaroslav Kysela

[PATCH] fastcall / regparm fixes

From: Gerd Knorr <kraxel@suse.de>

Current gcc's error out if a function's declaration and definition disagree
about the register passing convention.

The patch adds a new `fastcall' declatation primitive, and uses that in all
the FASTCALL functions which we could find.  A number of inconsistencies were
fixed up along the way.
parent 38ace632
...@@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) ...@@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
* the task-switch, and shows up in ret_from_fork in entry.S, * the task-switch, and shows up in ret_from_fork in entry.S,
* for example. * for example.
*/ */
struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{ {
struct thread_struct *prev = &prev_p->thread, struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread; *next = &next_p->thread;
......
...@@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, ...@@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
* want to handle. Thus you cannot kill init even with a SIGKILL even by * want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake. * mistake.
*/ */
int do_signal(struct pt_regs *regs, sigset_t *oldset) int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
{ {
siginfo_t info; siginfo_t info;
int signr; int signr;
......
...@@ -150,7 +150,7 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector) ...@@ -150,7 +150,7 @@ inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
apic_write_around(APIC_ICR, cfg); apic_write_around(APIC_ICR, cfg);
} }
void send_IPI_self(int vector) void fastcall send_IPI_self(int vector)
{ {
__send_IPI_shortcut(APIC_DEST_SELF, vector); __send_IPI_shortcut(APIC_DEST_SELF, vector);
} }
......
...@@ -95,7 +95,7 @@ ...@@ -95,7 +95,7 @@
#define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
{ {
struct tss_struct *tss; struct tss_struct *tss;
struct pt_regs *ret; struct pt_regs *ret;
......
...@@ -598,7 +598,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp) ...@@ -598,7 +598,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp)
} }
static void FASTCALL(rx_refill_atomic(struct net_device *ndev)); static void FASTCALL(rx_refill_atomic(struct net_device *ndev));
static void rx_refill_atomic(struct net_device *ndev) static void fastcall rx_refill_atomic(struct net_device *ndev)
{ {
rx_refill(ndev, GFP_ATOMIC); rx_refill(ndev, GFP_ATOMIC);
} }
...@@ -620,7 +620,7 @@ static inline void clear_rx_desc(struct ns83820 *dev, unsigned i) ...@@ -620,7 +620,7 @@ static inline void clear_rx_desc(struct ns83820 *dev, unsigned i)
} }
static void FASTCALL(phy_intr(struct net_device *ndev)); static void FASTCALL(phy_intr(struct net_device *ndev));
static void phy_intr(struct net_device *ndev) static void fastcall phy_intr(struct net_device *ndev)
{ {
struct ns83820 *dev = PRIV(ndev); struct ns83820 *dev = PRIV(ndev);
static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" }; static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
...@@ -807,7 +807,7 @@ static void ns83820_cleanup_rx(struct ns83820 *dev) ...@@ -807,7 +807,7 @@ static void ns83820_cleanup_rx(struct ns83820 *dev)
} }
static void FASTCALL(ns83820_rx_kick(struct net_device *ndev)); static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));
static void ns83820_rx_kick(struct net_device *ndev) static void fastcall ns83820_rx_kick(struct net_device *ndev)
{ {
struct ns83820 *dev = PRIV(ndev); struct ns83820 *dev = PRIV(ndev);
/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ { /*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
...@@ -829,7 +829,7 @@ static void ns83820_rx_kick(struct net_device *ndev) ...@@ -829,7 +829,7 @@ static void ns83820_rx_kick(struct net_device *ndev)
* *
*/ */
static void FASTCALL(rx_irq(struct net_device *ndev)); static void FASTCALL(rx_irq(struct net_device *ndev));
static void rx_irq(struct net_device *ndev) static void fastcall rx_irq(struct net_device *ndev)
{ {
struct ns83820 *dev = PRIV(ndev); struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info; struct rx_info *info = &dev->rx_info;
......
...@@ -312,7 +312,7 @@ void wait_for_all_aios(struct kioctx *ctx) ...@@ -312,7 +312,7 @@ void wait_for_all_aios(struct kioctx *ctx)
/* wait_on_sync_kiocb: /* wait_on_sync_kiocb:
* Waits on the given sync kiocb to complete. * Waits on the given sync kiocb to complete.
*/ */
ssize_t wait_on_sync_kiocb(struct kiocb *iocb) ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
{ {
while (iocb->ki_users) { while (iocb->ki_users) {
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
...@@ -331,7 +331,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb) ...@@ -331,7 +331,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
* go away, they will call put_ioctx and release any pinned memory * go away, they will call put_ioctx and release any pinned memory
* associated with the request (held via struct page * references). * associated with the request (held via struct page * references).
*/ */
void exit_aio(struct mm_struct *mm) void fastcall exit_aio(struct mm_struct *mm)
{ {
struct kioctx *ctx = mm->ioctx_list; struct kioctx *ctx = mm->ioctx_list;
mm->ioctx_list = NULL; mm->ioctx_list = NULL;
...@@ -356,7 +356,7 @@ void exit_aio(struct mm_struct *mm) ...@@ -356,7 +356,7 @@ void exit_aio(struct mm_struct *mm)
* Called when the last user of an aio context has gone away, * Called when the last user of an aio context has gone away,
* and the struct needs to be freed. * and the struct needs to be freed.
*/ */
void __put_ioctx(struct kioctx *ctx) void fastcall __put_ioctx(struct kioctx *ctx)
{ {
unsigned nr_events = ctx->max_reqs; unsigned nr_events = ctx->max_reqs;
...@@ -383,7 +383,7 @@ void __put_ioctx(struct kioctx *ctx) ...@@ -383,7 +383,7 @@ void __put_ioctx(struct kioctx *ctx)
* req (after submitting it) and aio_complete() freeing the req. * req (after submitting it) and aio_complete() freeing the req.
*/ */
static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx)); static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
static struct kiocb *__aio_get_req(struct kioctx *ctx) static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
{ {
struct kiocb *req = NULL; struct kiocb *req = NULL;
struct aio_ring *ring; struct aio_ring *ring;
...@@ -509,7 +509,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) ...@@ -509,7 +509,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
* Returns true if this put was the last user of the kiocb, * Returns true if this put was the last user of the kiocb,
* false if the request is still in use. * false if the request is still in use.
*/ */
int aio_put_req(struct kiocb *req) int fastcall aio_put_req(struct kiocb *req)
{ {
struct kioctx *ctx = req->ki_ctx; struct kioctx *ctx = req->ki_ctx;
int ret; int ret;
...@@ -596,7 +596,7 @@ static void aio_kick_handler(void *data) ...@@ -596,7 +596,7 @@ static void aio_kick_handler(void *data)
unuse_mm(ctx->mm); unuse_mm(ctx->mm);
} }
void kick_iocb(struct kiocb *iocb) void fastcall kick_iocb(struct kiocb *iocb)
{ {
struct kioctx *ctx = iocb->ki_ctx; struct kioctx *ctx = iocb->ki_ctx;
...@@ -622,7 +622,7 @@ void kick_iocb(struct kiocb *iocb) ...@@ -622,7 +622,7 @@ void kick_iocb(struct kiocb *iocb)
* Returns true if this is the last user of the request. The * Returns true if this is the last user of the request. The
* only other user of the request can be the cancellation code. * only other user of the request can be the cancellation code.
*/ */
int aio_complete(struct kiocb *iocb, long res, long res2) int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
{ {
struct kioctx *ctx = iocb->ki_ctx; struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info; struct aio_ring_info *info;
...@@ -985,7 +985,7 @@ asmlinkage long sys_io_destroy(aio_context_t ctx) ...@@ -985,7 +985,7 @@ asmlinkage long sys_io_destroy(aio_context_t ctx)
return -EINVAL; return -EINVAL;
} }
int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb) struct iocb *iocb)
{ {
struct kiocb *req; struct kiocb *req;
......
...@@ -97,7 +97,7 @@ void wake_up_buffer(struct buffer_head *bh) ...@@ -97,7 +97,7 @@ void wake_up_buffer(struct buffer_head *bh)
} }
EXPORT_SYMBOL(wake_up_buffer); EXPORT_SYMBOL(wake_up_buffer);
void unlock_buffer(struct buffer_head *bh) void fastcall unlock_buffer(struct buffer_head *bh)
{ {
/* /*
* unlock_buffer against a zero-count bh is a bug, if the page * unlock_buffer against a zero-count bh is a bug, if the page
...@@ -1256,7 +1256,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) ...@@ -1256,7 +1256,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
* mapping->page_lock and the global inode_lock. * mapping->page_lock and the global inode_lock.
*/ */
void mark_buffer_dirty(struct buffer_head *bh) void fastcall mark_buffer_dirty(struct buffer_head *bh)
{ {
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
buffer_error(); buffer_error();
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <asm/siginfo.h> #include <asm/siginfo.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
void set_close_on_exec(unsigned int fd, int flag) void fastcall set_close_on_exec(unsigned int fd, int flag)
{ {
struct files_struct *files = current->files; struct files_struct *files = current->files;
spin_lock(&files->file_lock); spin_lock(&files->file_lock);
......
...@@ -152,7 +152,7 @@ void close_private_file(struct file *file) ...@@ -152,7 +152,7 @@ void close_private_file(struct file *file)
EXPORT_SYMBOL(close_private_file); EXPORT_SYMBOL(close_private_file);
void fput(struct file *file) void fastcall fput(struct file *file)
{ {
if (atomic_dec_and_test(&file->f_count)) if (atomic_dec_and_test(&file->f_count))
__fput(file); __fput(file);
...@@ -163,7 +163,7 @@ EXPORT_SYMBOL(fput); ...@@ -163,7 +163,7 @@ EXPORT_SYMBOL(fput);
/* __fput is called from task context when aio completion releases the last /* __fput is called from task context when aio completion releases the last
* last use of a struct file *. Do not use otherwise. * last use of a struct file *. Do not use otherwise.
*/ */
void __fput(struct file *file) void fastcall __fput(struct file *file)
{ {
struct dentry *dentry = file->f_dentry; struct dentry *dentry = file->f_dentry;
struct vfsmount *mnt = file->f_vfsmnt; struct vfsmount *mnt = file->f_vfsmnt;
...@@ -192,7 +192,7 @@ void __fput(struct file *file) ...@@ -192,7 +192,7 @@ void __fput(struct file *file)
mntput(mnt); mntput(mnt);
} }
struct file *fget(unsigned int fd) struct file fastcall *fget(unsigned int fd)
{ {
struct file *file; struct file *file;
struct files_struct *files = current->files; struct files_struct *files = current->files;
...@@ -214,7 +214,7 @@ EXPORT_SYMBOL(fget); ...@@ -214,7 +214,7 @@ EXPORT_SYMBOL(fget);
* and a flag is returned to be passed to the corresponding fput_light(). * and a flag is returned to be passed to the corresponding fput_light().
* There must not be a cloning between an fget_light/fput_light pair. * There must not be a cloning between an fget_light/fput_light pair.
*/ */
struct file *fget_light(unsigned int fd, int *fput_needed) struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
{ {
struct file *file; struct file *file;
struct files_struct *files = current->files; struct files_struct *files = current->files;
......
...@@ -571,7 +571,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, ...@@ -571,7 +571,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
* *
* We expect 'base' to be positive and a directory. * We expect 'base' to be positive and a directory.
*/ */
int link_path_walk(const char * name, struct nameidata *nd) int fastcall link_path_walk(const char * name, struct nameidata *nd)
{ {
struct path next; struct path next;
struct inode *inode; struct inode *inode;
...@@ -771,7 +771,7 @@ int link_path_walk(const char * name, struct nameidata *nd) ...@@ -771,7 +771,7 @@ int link_path_walk(const char * name, struct nameidata *nd)
return err; return err;
} }
int path_walk(const char * name, struct nameidata *nd) int fastcall path_walk(const char * name, struct nameidata *nd)
{ {
current->total_link_count = 0; current->total_link_count = 0;
return link_path_walk(name, nd); return link_path_walk(name, nd);
...@@ -858,7 +858,7 @@ walk_init_root(const char *name, struct nameidata *nd) ...@@ -858,7 +858,7 @@ walk_init_root(const char *name, struct nameidata *nd)
return 1; return 1;
} }
int path_lookup(const char *name, unsigned int flags, struct nameidata *nd) int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
{ {
nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd->flags = flags; nd->flags = flags;
...@@ -971,7 +971,7 @@ struct dentry * lookup_one_len(const char * name, struct dentry * base, int len) ...@@ -971,7 +971,7 @@ struct dentry * lookup_one_len(const char * name, struct dentry * base, int len)
* that namei follows links, while lnamei does not. * that namei follows links, while lnamei does not.
* SMP-safe * SMP-safe
*/ */
int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
{ {
char *tmp = getname(name); char *tmp = getname(name);
int err = PTR_ERR(tmp); int err = PTR_ERR(tmp);
......
...@@ -890,7 +890,7 @@ static inline void __put_unused_fd(struct files_struct *files, unsigned int fd) ...@@ -890,7 +890,7 @@ static inline void __put_unused_fd(struct files_struct *files, unsigned int fd)
files->next_fd = fd; files->next_fd = fd;
} }
void put_unused_fd(unsigned int fd) void fastcall put_unused_fd(unsigned int fd)
{ {
struct files_struct *files = current->files; struct files_struct *files = current->files;
spin_lock(&files->file_lock); spin_lock(&files->file_lock);
...@@ -913,7 +913,7 @@ EXPORT_SYMBOL(put_unused_fd); ...@@ -913,7 +913,7 @@ EXPORT_SYMBOL(put_unused_fd);
* will follow. * will follow.
*/ */
void fd_install(unsigned int fd, struct file * file) void fastcall fd_install(unsigned int fd, struct file * file)
{ {
struct files_struct *files = current->files; struct files_struct *files = current->files;
spin_lock(&files->file_lock); spin_lock(&files->file_lock);
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0))) #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
#define FASTCALL(x) x __attribute__((regparm(3))) #define FASTCALL(x) x __attribute__((regparm(3)))
#define fastcall __attribute__((regparm(3)))
#ifdef CONFIG_X86_ALIGNMENT_16 #ifdef CONFIG_X86_ALIGNMENT_16
#define __ALIGN .align 16,0x90 #define __ALIGN .align 16,0x90
......
...@@ -38,7 +38,6 @@ extern int cpu_sibling_map[]; ...@@ -38,7 +38,6 @@ extern int cpu_sibling_map[];
extern void smp_flush_tlb(void); extern void smp_flush_tlb(void);
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs); extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
extern void smp_invalidate_rcv(void); /* Process an NMI */ extern void smp_invalidate_rcv(void); /* Process an NMI */
extern void (*mtrr_hook) (void); extern void (*mtrr_hook) (void);
extern void zap_low_mappings (void); extern void zap_low_mappings (void);
......
...@@ -2,5 +2,6 @@ ...@@ -2,5 +2,6 @@
#define __ASM_LINKAGE_H #define __ASM_LINKAGE_H
#define FASTCALL(x) x __attribute__((regparm(3))) #define FASTCALL(x) x __attribute__((regparm(3)))
#define fastcall __attribute__((regparm(3)))
#endif #endif
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#ifndef FASTCALL #ifndef FASTCALL
#define FASTCALL(x) x #define FASTCALL(x) x
#define fastcall
#endif #endif
#endif #endif
...@@ -602,7 +602,7 @@ extern void do_timer(struct pt_regs *); ...@@ -602,7 +602,7 @@ extern void do_timer(struct pt_regs *);
extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
extern int FASTCALL(wake_up_process(struct task_struct * tsk)); extern int FASTCALL(wake_up_process(struct task_struct * tsk));
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void FASTCALL(kick_process(struct task_struct * tsk)); extern void kick_process(struct task_struct *tsk);
#else #else
static inline void kick_process(struct task_struct *tsk) { } static inline void kick_process(struct task_struct *tsk) { }
#endif #endif
......
...@@ -30,7 +30,7 @@ extern void smp_send_stop(void); ...@@ -30,7 +30,7 @@ extern void smp_send_stop(void);
/* /*
* sends a 'reschedule' event to another CPU: * sends a 'reschedule' event to another CPU:
*/ */
extern void FASTCALL(smp_send_reschedule(int cpu)); extern void smp_send_reschedule(int cpu);
/* /*
......
...@@ -386,7 +386,7 @@ static inline void close_files(struct files_struct * files) ...@@ -386,7 +386,7 @@ static inline void close_files(struct files_struct * files)
} }
} }
void put_files_struct(struct files_struct *files) void fastcall put_files_struct(struct files_struct *files)
{ {
if (atomic_dec_and_test(&files->count)) { if (atomic_dec_and_test(&files->count)) {
close_files(files); close_files(files);
...@@ -810,7 +810,7 @@ asmlinkage long sys_exit(int error_code) ...@@ -810,7 +810,7 @@ asmlinkage long sys_exit(int error_code)
do_exit((error_code&0xff)<<8); do_exit((error_code&0xff)<<8);
} }
task_t *next_thread(task_t *p) task_t fastcall *next_thread(task_t *p)
{ {
struct pid_link *link = p->pids + PIDTYPE_TGID; struct pid_link *link = p->pids + PIDTYPE_TGID;
struct list_head *tmp, *head = &link->pidptr->task_list; struct list_head *tmp, *head = &link->pidptr->task_list;
......
...@@ -91,7 +91,7 @@ void __put_task_struct(struct task_struct *tsk) ...@@ -91,7 +91,7 @@ void __put_task_struct(struct task_struct *tsk)
free_task(tsk); free_task(tsk);
} }
void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{ {
unsigned long flags; unsigned long flags;
...@@ -103,7 +103,7 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) ...@@ -103,7 +103,7 @@ void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(add_wait_queue); EXPORT_SYMBOL(add_wait_queue);
void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
{ {
unsigned long flags; unsigned long flags;
...@@ -115,7 +115,7 @@ void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait) ...@@ -115,7 +115,7 @@ void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL(add_wait_queue_exclusive); EXPORT_SYMBOL(add_wait_queue_exclusive);
void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait) void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
{ {
unsigned long flags; unsigned long flags;
...@@ -139,7 +139,7 @@ EXPORT_SYMBOL(remove_wait_queue); ...@@ -139,7 +139,7 @@ EXPORT_SYMBOL(remove_wait_queue);
* stops them from bleeding out - it would still allow subsequent * stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region). * loads to move into the the critical region).
*/ */
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
...@@ -153,7 +153,7 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) ...@@ -153,7 +153,7 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
EXPORT_SYMBOL(prepare_to_wait); EXPORT_SYMBOL(prepare_to_wait);
void void fastcall
prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{ {
unsigned long flags; unsigned long flags;
...@@ -168,7 +168,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) ...@@ -168,7 +168,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
EXPORT_SYMBOL(prepare_to_wait_exclusive); EXPORT_SYMBOL(prepare_to_wait_exclusive);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{ {
unsigned long flags; unsigned long flags;
...@@ -418,7 +418,7 @@ struct mm_struct * mm_alloc(void) ...@@ -418,7 +418,7 @@ struct mm_struct * mm_alloc(void)
* is dropped: either by a lazy thread or by * is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm. * mmput. Free the page directory and the mm.
*/ */
void __mmdrop(struct mm_struct *mm) void fastcall __mmdrop(struct mm_struct *mm)
{ {
BUG_ON(mm == &init_mm); BUG_ON(mm == &init_mm);
mm_free_pgd(mm); mm_free_pgd(mm);
......
...@@ -57,7 +57,7 @@ static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES; ...@@ -57,7 +57,7 @@ static pidmap_t *map_limit = pidmap_array + PIDMAP_ENTRIES;
static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
inline void free_pidmap(int pid) fastcall void free_pidmap(int pid)
{ {
pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE; pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
int offset = pid & BITS_PER_PAGE_MASK; int offset = pid & BITS_PER_PAGE_MASK;
...@@ -146,7 +146,7 @@ int alloc_pidmap(void) ...@@ -146,7 +146,7 @@ int alloc_pidmap(void)
return -1; return -1;
} }
inline struct pid *find_pid(enum pid_type type, int nr) fastcall struct pid *find_pid(enum pid_type type, int nr)
{ {
struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)]; struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
struct pid *pid; struct pid *pid;
...@@ -159,14 +159,14 @@ inline struct pid *find_pid(enum pid_type type, int nr) ...@@ -159,14 +159,14 @@ inline struct pid *find_pid(enum pid_type type, int nr)
return NULL; return NULL;
} }
void link_pid(task_t *task, struct pid_link *link, struct pid *pid) void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
{ {
atomic_inc(&pid->count); atomic_inc(&pid->count);
list_add_tail(&link->pid_chain, &pid->task_list); list_add_tail(&link->pid_chain, &pid->task_list);
link->pidptr = pid; link->pidptr = pid;
} }
int attach_pid(task_t *task, enum pid_type type, int nr) int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
{ {
struct pid *pid = find_pid(type, nr); struct pid *pid = find_pid(type, nr);
...@@ -209,7 +209,7 @@ static void _detach_pid(task_t *task, enum pid_type type) ...@@ -209,7 +209,7 @@ static void _detach_pid(task_t *task, enum pid_type type)
__detach_pid(task, type); __detach_pid(task, type);
} }
void detach_pid(task_t *task, enum pid_type type) void fastcall detach_pid(task_t *task, enum pid_type type)
{ {
int nr = __detach_pid(task, type); int nr = __detach_pid(task, type);
......
...@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; ...@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
* The read-side of critical section that use call_rcu() for updation must * The read-side of critical section that use call_rcu() for updation must
* be protected by rcu_read_lock()/rcu_read_unlock(). * be protected by rcu_read_lock()/rcu_read_unlock().
*/ */
void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg) void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
{ {
int cpu; int cpu;
unsigned long flags; unsigned long flags;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smp.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/cpu.h> #include <linux/cpu.h>
...@@ -700,7 +701,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -700,7 +701,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
return success; return success;
} }
int wake_up_process(task_t * p) int fastcall wake_up_process(task_t * p)
{ {
return try_to_wake_up(p, TASK_STOPPED | return try_to_wake_up(p, TASK_STOPPED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
...@@ -708,7 +709,7 @@ int wake_up_process(task_t * p) ...@@ -708,7 +709,7 @@ int wake_up_process(task_t * p)
EXPORT_SYMBOL(wake_up_process); EXPORT_SYMBOL(wake_up_process);
int wake_up_state(task_t *p, unsigned int state) int fastcall wake_up_state(task_t *p, unsigned int state)
{ {
return try_to_wake_up(p, state, 0); return try_to_wake_up(p, state, 0);
} }
...@@ -717,7 +718,7 @@ int wake_up_state(task_t *p, unsigned int state) ...@@ -717,7 +718,7 @@ int wake_up_state(task_t *p, unsigned int state)
* Perform scheduler related setup for a newly forked process p. * Perform scheduler related setup for a newly forked process p.
* p is forked by current. * p is forked by current.
*/ */
void sched_fork(task_t *p) void fastcall sched_fork(task_t *p)
{ {
/* /*
* We mark the process as running here, but have not actually * We mark the process as running here, but have not actually
...@@ -773,7 +774,7 @@ void sched_fork(task_t *p) ...@@ -773,7 +774,7 @@ void sched_fork(task_t *p)
* This function will do some initial scheduler statistics housekeeping * This function will do some initial scheduler statistics housekeeping
* that must be done for every newly created process. * that must be done for every newly created process.
*/ */
void wake_up_forked_process(task_t * p) void fastcall wake_up_forked_process(task_t * p)
{ {
unsigned long flags; unsigned long flags;
runqueue_t *rq = task_rq_lock(current, &flags); runqueue_t *rq = task_rq_lock(current, &flags);
...@@ -817,7 +818,7 @@ void wake_up_forked_process(task_t * p) ...@@ -817,7 +818,7 @@ void wake_up_forked_process(task_t * p)
* artificially, because any timeslice recovered here * artificially, because any timeslice recovered here
* was given away by the parent in the first place.) * was given away by the parent in the first place.)
*/ */
void sched_exit(task_t * p) void fastcall sched_exit(task_t * p)
{ {
unsigned long flags; unsigned long flags;
runqueue_t *rq; runqueue_t *rq;
...@@ -1796,7 +1797,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, ...@@ -1796,7 +1797,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
* @mode: which threads * @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up * @nr_exclusive: how many wake-one or wake-many threads to wake up
*/ */
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{ {
unsigned long flags; unsigned long flags;
...@@ -1810,7 +1811,7 @@ EXPORT_SYMBOL(__wake_up); ...@@ -1810,7 +1811,7 @@ EXPORT_SYMBOL(__wake_up);
/* /*
* Same as __wake_up but called with the spinlock in wait_queue_head_t held. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
*/ */
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{ {
__wake_up_common(q, mode, 1, 0); __wake_up_common(q, mode, 1, 0);
} }
...@@ -1828,7 +1829,7 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) ...@@ -1828,7 +1829,7 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
* *
* On UP it can prevent extra preemption. * On UP it can prevent extra preemption.
*/ */
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{ {
unsigned long flags; unsigned long flags;
...@@ -1845,7 +1846,7 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) ...@@ -1845,7 +1846,7 @@ void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
void complete(struct completion *x) void fastcall complete(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
...@@ -1858,7 +1859,7 @@ void complete(struct completion *x) ...@@ -1858,7 +1859,7 @@ void complete(struct completion *x)
EXPORT_SYMBOL(complete); EXPORT_SYMBOL(complete);
void complete_all(struct completion *x) void fastcall complete_all(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
...@@ -1869,7 +1870,7 @@ void complete_all(struct completion *x) ...@@ -1869,7 +1870,7 @@ void complete_all(struct completion *x)
spin_unlock_irqrestore(&x->wait.lock, flags); spin_unlock_irqrestore(&x->wait.lock, flags);
} }
void wait_for_completion(struct completion *x) void fastcall wait_for_completion(struct completion *x)
{ {
might_sleep(); might_sleep();
spin_lock_irq(&x->wait.lock); spin_lock_irq(&x->wait.lock);
...@@ -1907,7 +1908,7 @@ EXPORT_SYMBOL(wait_for_completion); ...@@ -1907,7 +1908,7 @@ EXPORT_SYMBOL(wait_for_completion);
__remove_wait_queue(q, &wait); \ __remove_wait_queue(q, &wait); \
spin_unlock_irqrestore(&q->lock, flags); spin_unlock_irqrestore(&q->lock, flags);
void interruptible_sleep_on(wait_queue_head_t *q) void fastcall interruptible_sleep_on(wait_queue_head_t *q)
{ {
SLEEP_ON_VAR SLEEP_ON_VAR
...@@ -1920,7 +1921,7 @@ void interruptible_sleep_on(wait_queue_head_t *q) ...@@ -1920,7 +1921,7 @@ void interruptible_sleep_on(wait_queue_head_t *q)
EXPORT_SYMBOL(interruptible_sleep_on); EXPORT_SYMBOL(interruptible_sleep_on);
long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{ {
SLEEP_ON_VAR SLEEP_ON_VAR
...@@ -1935,7 +1936,7 @@ long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) ...@@ -1935,7 +1936,7 @@ long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
EXPORT_SYMBOL(interruptible_sleep_on_timeout); EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void sleep_on(wait_queue_head_t *q) void fastcall sleep_on(wait_queue_head_t *q)
{ {
SLEEP_ON_VAR SLEEP_ON_VAR
...@@ -1948,7 +1949,7 @@ void sleep_on(wait_queue_head_t *q) ...@@ -1948,7 +1949,7 @@ void sleep_on(wait_queue_head_t *q)
EXPORT_SYMBOL(sleep_on); EXPORT_SYMBOL(sleep_on);
long sleep_on_timeout(wait_queue_head_t *q, long timeout) long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
{ {
SLEEP_ON_VAR SLEEP_ON_VAR
......
...@@ -213,7 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) ...@@ -213,7 +213,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
#define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
inline void recalc_sigpending_tsk(struct task_struct *t) fastcall void recalc_sigpending_tsk(struct task_struct *t)
{ {
if (t->signal->group_stop_count > 0 || if (t->signal->group_stop_count > 0 ||
PENDING(&t->pending, &t->blocked) || PENDING(&t->pending, &t->blocked) ||
......
...@@ -130,7 +130,7 @@ EXPORT_SYMBOL(local_bh_enable); ...@@ -130,7 +130,7 @@ EXPORT_SYMBOL(local_bh_enable);
/* /*
* This function must run with irqs disabled! * This function must run with irqs disabled!
*/ */
inline void raise_softirq_irqoff(unsigned int nr) inline fastcall void raise_softirq_irqoff(unsigned int nr)
{ {
__raise_softirq_irqoff(nr); __raise_softirq_irqoff(nr);
...@@ -149,7 +149,7 @@ inline void raise_softirq_irqoff(unsigned int nr) ...@@ -149,7 +149,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
EXPORT_SYMBOL(raise_softirq_irqoff); EXPORT_SYMBOL(raise_softirq_irqoff);
void raise_softirq(unsigned int nr) void fastcall raise_softirq(unsigned int nr)
{ {
unsigned long flags; unsigned long flags;
...@@ -179,7 +179,7 @@ struct tasklet_head ...@@ -179,7 +179,7 @@ struct tasklet_head
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
void __tasklet_schedule(struct tasklet_struct *t) void fastcall __tasklet_schedule(struct tasklet_struct *t)
{ {
unsigned long flags; unsigned long flags;
...@@ -192,7 +192,7 @@ void __tasklet_schedule(struct tasklet_struct *t) ...@@ -192,7 +192,7 @@ void __tasklet_schedule(struct tasklet_struct *t)
EXPORT_SYMBOL(__tasklet_schedule); EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t) void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -997,7 +997,7 @@ static void process_timeout(unsigned long __data) ...@@ -997,7 +997,7 @@ static void process_timeout(unsigned long __data)
* *
* In all cases the return value is guaranteed to be non-negative. * In all cases the return value is guaranteed to be non-negative.
*/ */
signed long schedule_timeout(signed long timeout) fastcall signed long schedule_timeout(signed long timeout)
{ {
struct timer_list timer; struct timer_list timer;
unsigned long expire; unsigned long expire;
......
...@@ -78,7 +78,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, ...@@ -78,7 +78,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
* We queue the work to the CPU it was submitted, but there is no * We queue the work to the CPU it was submitted, but there is no
* guarantee that it will be processed by that CPU. * guarantee that it will be processed by that CPU.
*/ */
int queue_work(struct workqueue_struct *wq, struct work_struct *work) int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
{ {
int ret = 0, cpu = get_cpu(); int ret = 0, cpu = get_cpu();
...@@ -99,7 +99,7 @@ static void delayed_work_timer_fn(unsigned long __data) ...@@ -99,7 +99,7 @@ static void delayed_work_timer_fn(unsigned long __data)
__queue_work(wq->cpu_wq + smp_processor_id(), work); __queue_work(wq->cpu_wq + smp_processor_id(), work);
} }
int queue_delayed_work(struct workqueue_struct *wq, int fastcall queue_delayed_work(struct workqueue_struct *wq,
struct work_struct *work, unsigned long delay) struct work_struct *work, unsigned long delay)
{ {
int ret = 0; int ret = 0;
...@@ -203,7 +203,7 @@ static int worker_thread(void *__cwq) ...@@ -203,7 +203,7 @@ static int worker_thread(void *__cwq)
* This function used to run the workqueues itself. Now we just wait for the * This function used to run the workqueues itself. Now we just wait for the
* helper threads to do it. * helper threads to do it.
*/ */
void flush_workqueue(struct workqueue_struct *wq) void fastcall flush_workqueue(struct workqueue_struct *wq)
{ {
struct cpu_workqueue_struct *cwq; struct cpu_workqueue_struct *cwq;
int cpu; int cpu;
...@@ -310,12 +310,12 @@ void destroy_workqueue(struct workqueue_struct *wq) ...@@ -310,12 +310,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
static struct workqueue_struct *keventd_wq; static struct workqueue_struct *keventd_wq;
int schedule_work(struct work_struct *work) int fastcall schedule_work(struct work_struct *work)
{ {
return queue_work(keventd_wq, work); return queue_work(keventd_wq, work);
} }
int schedule_delayed_work(struct work_struct *work, unsigned long delay) int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
{ {
return queue_delayed_work(keventd_wq, work, delay); return queue_delayed_work(keventd_wq, work, delay);
} }
......
...@@ -29,7 +29,7 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str) ...@@ -29,7 +29,7 @@ void rwsemtrace(struct rw_semaphore *sem, const char *str)
/* /*
* initialise the semaphore * initialise the semaphore
*/ */
void init_rwsem(struct rw_semaphore *sem) void fastcall init_rwsem(struct rw_semaphore *sem)
{ {
sem->activity = 0; sem->activity = 0;
spin_lock_init(&sem->wait_lock); spin_lock_init(&sem->wait_lock);
...@@ -117,7 +117,7 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore * ...@@ -117,7 +117,7 @@ static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *
/* /*
* get a read lock on the semaphore * get a read lock on the semaphore
*/ */
void __down_read(struct rw_semaphore *sem) void fastcall __down_read(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -162,7 +162,7 @@ void __down_read(struct rw_semaphore *sem) ...@@ -162,7 +162,7 @@ void __down_read(struct rw_semaphore *sem)
/* /*
* trylock for reading -- returns 1 if successful, 0 if contention * trylock for reading -- returns 1 if successful, 0 if contention
*/ */
int __down_read_trylock(struct rw_semaphore *sem) int fastcall __down_read_trylock(struct rw_semaphore *sem)
{ {
int ret = 0; int ret = 0;
rwsemtrace(sem,"Entering __down_read_trylock"); rwsemtrace(sem,"Entering __down_read_trylock");
...@@ -185,7 +185,7 @@ int __down_read_trylock(struct rw_semaphore *sem) ...@@ -185,7 +185,7 @@ int __down_read_trylock(struct rw_semaphore *sem)
* get a write lock on the semaphore * get a write lock on the semaphore
* - note that we increment the waiting count anyway to indicate an exclusive lock * - note that we increment the waiting count anyway to indicate an exclusive lock
*/ */
void __down_write(struct rw_semaphore *sem) void fastcall __down_write(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
struct task_struct *tsk; struct task_struct *tsk;
...@@ -230,7 +230,7 @@ void __down_write(struct rw_semaphore *sem) ...@@ -230,7 +230,7 @@ void __down_write(struct rw_semaphore *sem)
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
int __down_write_trylock(struct rw_semaphore *sem) int fastcall __down_write_trylock(struct rw_semaphore *sem)
{ {
int ret = 0; int ret = 0;
rwsemtrace(sem,"Entering __down_write_trylock"); rwsemtrace(sem,"Entering __down_write_trylock");
...@@ -252,7 +252,7 @@ int __down_write_trylock(struct rw_semaphore *sem) ...@@ -252,7 +252,7 @@ int __down_write_trylock(struct rw_semaphore *sem)
/* /*
* release a read lock on the semaphore * release a read lock on the semaphore
*/ */
void __up_read(struct rw_semaphore *sem) void fastcall __up_read(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __up_read"); rwsemtrace(sem,"Entering __up_read");
...@@ -269,7 +269,7 @@ void __up_read(struct rw_semaphore *sem) ...@@ -269,7 +269,7 @@ void __up_read(struct rw_semaphore *sem)
/* /*
* release a write lock on the semaphore * release a write lock on the semaphore
*/ */
void __up_write(struct rw_semaphore *sem) void fastcall __up_write(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __up_write"); rwsemtrace(sem,"Entering __up_write");
...@@ -288,7 +288,7 @@ void __up_write(struct rw_semaphore *sem) ...@@ -288,7 +288,7 @@ void __up_write(struct rw_semaphore *sem)
* downgrade a write lock into a read lock * downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue * - just wake up any readers at the front of the queue
*/ */
void __downgrade_write(struct rw_semaphore *sem) void fastcall __downgrade_write(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering __downgrade_write"); rwsemtrace(sem,"Entering __downgrade_write");
......
...@@ -162,7 +162,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore ...@@ -162,7 +162,7 @@ static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore
/* /*
* wait for the read lock to be granted * wait for the read lock to be granted
*/ */
struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
...@@ -178,7 +178,7 @@ struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) ...@@ -178,7 +178,7 @@ struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
/* /*
* wait for the write lock to be granted * wait for the write lock to be granted
*/ */
struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
{ {
struct rwsem_waiter waiter; struct rwsem_waiter waiter;
...@@ -195,7 +195,7 @@ struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) ...@@ -195,7 +195,7 @@ struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
* handle waking up a waiter on the semaphore * handle waking up a waiter on the semaphore
* - up_read has decremented the active part of the count if we come here * - up_read has decremented the active part of the count if we come here
*/ */
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering rwsem_wake"); rwsemtrace(sem,"Entering rwsem_wake");
...@@ -217,7 +217,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) ...@@ -217,7 +217,7 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
* - caller incremented waiting part of count, and discovered it to be still negative * - caller incremented waiting part of count, and discovered it to be still negative
* - just wake up any readers at the front of the queue * - just wake up any readers at the front of the queue
*/ */
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{ {
rwsemtrace(sem,"Entering rwsem_downgrade_wake"); rwsemtrace(sem,"Entering rwsem_downgrade_wake");
......
...@@ -292,7 +292,7 @@ static wait_queue_head_t *page_waitqueue(struct page *page) ...@@ -292,7 +292,7 @@ static wait_queue_head_t *page_waitqueue(struct page *page)
return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
} }
void wait_on_page_bit(struct page *page, int bit_nr) void fastcall wait_on_page_bit(struct page *page, int bit_nr)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *waitqueue = page_waitqueue(page);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -324,7 +324,7 @@ EXPORT_SYMBOL(wait_on_page_bit); ...@@ -324,7 +324,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a * the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()). * parallel wait_on_page_locked()).
*/ */
void unlock_page(struct page *page) void fastcall unlock_page(struct page *page)
{ {
wait_queue_head_t *waitqueue = page_waitqueue(page); wait_queue_head_t *waitqueue = page_waitqueue(page);
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
...@@ -365,7 +365,7 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -365,7 +365,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty, * chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE. * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/ */
void __lock_page(struct page *page) void fastcall __lock_page(struct page *page)
{ {
wait_queue_head_t *wqh = page_waitqueue(page); wait_queue_head_t *wqh = page_waitqueue(page);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -953,7 +953,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) ...@@ -953,7 +953,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* and schedules an I/O to read in its contents from disk. * and schedules an I/O to read in its contents from disk.
*/ */
static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
static int page_cache_read(struct file * file, unsigned long offset) static int fastcall page_cache_read(struct file * file, unsigned long offset)
{ {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct page *page; struct page *page;
......
...@@ -147,7 +147,7 @@ static inline unsigned long map_new_virtual(struct page *page) ...@@ -147,7 +147,7 @@ static inline unsigned long map_new_virtual(struct page *page)
return vaddr; return vaddr;
} }
void *kmap_high(struct page *page) void fastcall *kmap_high(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
...@@ -170,7 +170,7 @@ void *kmap_high(struct page *page) ...@@ -170,7 +170,7 @@ void *kmap_high(struct page *page)
EXPORT_SYMBOL(kmap_high); EXPORT_SYMBOL(kmap_high);
void kunmap_high(struct page *page) void fastcall kunmap_high(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
unsigned long nr; unsigned long nr;
......
...@@ -145,7 +145,7 @@ void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr) ...@@ -145,7 +145,7 @@ void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr)
} while (--nr); } while (--nr);
} }
pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{ {
if (!pmd_present(*pmd)) { if (!pmd_present(*pmd)) {
struct page *new; struct page *new;
...@@ -171,7 +171,7 @@ pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address) ...@@ -171,7 +171,7 @@ pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
return pte_offset_map(pmd, address); return pte_offset_map(pmd, address);
} }
pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address) pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{ {
if (!pmd_present(*pmd)) { if (!pmd_present(*pmd)) {
pte_t *new; pte_t *new;
...@@ -1646,7 +1646,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, ...@@ -1646,7 +1646,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
* On a two-level page table, this ends up actually being entirely * On a two-level page table, this ends up actually being entirely
* optimized away. * optimized away.
*/ */
pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
{ {
pmd_t *new; pmd_t *new;
......
...@@ -443,7 +443,7 @@ void drain_local_pages(void) ...@@ -443,7 +443,7 @@ void drain_local_pages(void)
* Free a 0-order page * Free a 0-order page
*/ */
static void FASTCALL(free_hot_cold_page(struct page *page, int cold)); static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
static void free_hot_cold_page(struct page *page, int cold) static void fastcall free_hot_cold_page(struct page *page, int cold)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
...@@ -462,12 +462,12 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -462,12 +462,12 @@ static void free_hot_cold_page(struct page *page, int cold)
put_cpu(); put_cpu();
} }
void free_hot_page(struct page *page) void fastcall free_hot_page(struct page *page)
{ {
free_hot_cold_page(page, 0); free_hot_cold_page(page, 0);
} }
void free_cold_page(struct page *page) void fastcall free_cold_page(struct page *page)
{ {
free_hot_cold_page(page, 1); free_hot_cold_page(page, 1);
} }
...@@ -532,7 +532,7 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold) ...@@ -532,7 +532,7 @@ static struct page *buffered_rmqueue(struct zone *zone, int order, int cold)
* sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA
* zone untouched. * zone untouched.
*/ */
struct page * struct page * fastcall
__alloc_pages(unsigned int gfp_mask, unsigned int order, __alloc_pages(unsigned int gfp_mask, unsigned int order,
struct zonelist *zonelist) struct zonelist *zonelist)
{ {
...@@ -685,7 +685,7 @@ EXPORT_SYMBOL(__alloc_pages); ...@@ -685,7 +685,7 @@ EXPORT_SYMBOL(__alloc_pages);
/* /*
* Common helper functions. * Common helper functions.
*/ */
unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
{ {
struct page * page; struct page * page;
...@@ -697,7 +697,7 @@ unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order) ...@@ -697,7 +697,7 @@ unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
EXPORT_SYMBOL(__get_free_pages); EXPORT_SYMBOL(__get_free_pages);
unsigned long get_zeroed_page(unsigned int gfp_mask) fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
{ {
struct page * page; struct page * page;
...@@ -726,7 +726,7 @@ void __pagevec_free(struct pagevec *pvec) ...@@ -726,7 +726,7 @@ void __pagevec_free(struct pagevec *pvec)
free_hot_cold_page(pvec->pages[i], pvec->cold); free_hot_cold_page(pvec->pages[i], pvec->cold);
} }
void __free_pages(struct page *page, unsigned int order) fastcall void __free_pages(struct page *page, unsigned int order)
{ {
if (!PageReserved(page) && put_page_testzero(page)) { if (!PageReserved(page) && put_page_testzero(page)) {
if (order == 0) if (order == 0)
...@@ -738,7 +738,7 @@ void __free_pages(struct page *page, unsigned int order) ...@@ -738,7 +738,7 @@ void __free_pages(struct page *page, unsigned int order)
EXPORT_SYMBOL(__free_pages); EXPORT_SYMBOL(__free_pages);
void free_pages(unsigned long addr, unsigned int order) fastcall void free_pages(unsigned long addr, unsigned int order)
{ {
if (addr != 0) { if (addr != 0) {
BUG_ON(!virt_addr_valid(addr)); BUG_ON(!virt_addr_valid(addr));
......
...@@ -112,7 +112,7 @@ pte_chain_encode(struct pte_chain *pte_chain, int idx) ...@@ -112,7 +112,7 @@ pte_chain_encode(struct pte_chain *pte_chain, int idx)
* If the page has a single-entry pte_chain, collapse that back to a PageDirect * If the page has a single-entry pte_chain, collapse that back to a PageDirect
* representation. This way, it's only done under memory pressure. * representation. This way, it's only done under memory pressure.
*/ */
int page_referenced(struct page * page) int fastcall page_referenced(struct page * page)
{ {
struct pte_chain *pc; struct pte_chain *pc;
int referenced = 0; int referenced = 0;
...@@ -165,7 +165,7 @@ int page_referenced(struct page * page) ...@@ -165,7 +165,7 @@ int page_referenced(struct page * page)
* Add a new pte reverse mapping to a page. * Add a new pte reverse mapping to a page.
* The caller needs to hold the mm->page_table_lock. * The caller needs to hold the mm->page_table_lock.
*/ */
struct pte_chain * struct pte_chain * fastcall
page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
{ {
pte_addr_t pte_paddr = ptep_to_paddr(ptep); pte_addr_t pte_paddr = ptep_to_paddr(ptep);
...@@ -221,7 +221,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain) ...@@ -221,7 +221,7 @@ page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
* the page. * the page.
* Caller needs to hold the mm->page_table_lock. * Caller needs to hold the mm->page_table_lock.
*/ */
void page_remove_rmap(struct page *page, pte_t *ptep) void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
{ {
pte_addr_t pte_paddr = ptep_to_paddr(ptep); pte_addr_t pte_paddr = ptep_to_paddr(ptep);
struct pte_chain *pc; struct pte_chain *pc;
...@@ -293,7 +293,7 @@ void page_remove_rmap(struct page *page, pte_t *ptep) ...@@ -293,7 +293,7 @@ void page_remove_rmap(struct page *page, pte_t *ptep)
* mm->page_table_lock try_to_unmap_one(), trylock * mm->page_table_lock try_to_unmap_one(), trylock
*/ */
static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t)); static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t));
static int try_to_unmap_one(struct page * page, pte_addr_t paddr) static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr)
{ {
pte_t *ptep = rmap_ptep_map(paddr); pte_t *ptep = rmap_ptep_map(paddr);
unsigned long address = ptep_to_address(ptep); unsigned long address = ptep_to_address(ptep);
...@@ -382,7 +382,7 @@ static int try_to_unmap_one(struct page * page, pte_addr_t paddr) ...@@ -382,7 +382,7 @@ static int try_to_unmap_one(struct page * page, pte_addr_t paddr)
* SWAP_AGAIN - we missed a trylock, try again later * SWAP_AGAIN - we missed a trylock, try again later
* SWAP_FAIL - the page is unswappable * SWAP_FAIL - the page is unswappable
*/ */
int try_to_unmap(struct page * page) int fastcall try_to_unmap(struct page * page)
{ {
struct pte_chain *pc, *next_pc, *start; struct pte_chain *pc, *next_pc, *start;
int ret = SWAP_SUCCESS; int ret = SWAP_SUCCESS;
......
...@@ -2134,7 +2134,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -2134,7 +2134,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
* *
* Currently only used for dentry validation. * Currently only used for dentry validation.
*/ */
int kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
{ {
unsigned long addr = (unsigned long) ptr; unsigned long addr = (unsigned long) ptr;
unsigned long min_addr = PAGE_OFFSET; unsigned long min_addr = PAGE_OFFSET;
......
...@@ -76,7 +76,7 @@ int rotate_reclaimable_page(struct page *page) ...@@ -76,7 +76,7 @@ int rotate_reclaimable_page(struct page *page)
/* /*
* FIXME: speed this up? * FIXME: speed this up?
*/ */
void activate_page(struct page *page) void fastcall activate_page(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -97,7 +97,7 @@ void activate_page(struct page *page) ...@@ -97,7 +97,7 @@ void activate_page(struct page *page)
* inactive,referenced -> active,unreferenced * inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced * active,unreferenced -> active,referenced
*/ */
void mark_page_accessed(struct page *page) void fastcall mark_page_accessed(struct page *page)
{ {
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page); activate_page(page);
...@@ -116,7 +116,7 @@ EXPORT_SYMBOL(mark_page_accessed); ...@@ -116,7 +116,7 @@ EXPORT_SYMBOL(mark_page_accessed);
static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, }; static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
void lru_cache_add(struct page *page) void fastcall lru_cache_add(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
...@@ -126,7 +126,7 @@ void lru_cache_add(struct page *page) ...@@ -126,7 +126,7 @@ void lru_cache_add(struct page *page)
put_cpu_var(lru_add_pvecs); put_cpu_var(lru_add_pvecs);
} }
void lru_cache_add_active(struct page *page) void fastcall lru_cache_add_active(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
...@@ -152,7 +152,7 @@ void lru_add_drain(void) ...@@ -152,7 +152,7 @@ void lru_add_drain(void)
* This path almost never happens for VM activity - pages are normally * This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking. * freed via pagevecs. But it gets used by networking.
*/ */
void __page_cache_release(struct page *page) void fastcall __page_cache_release(struct page *page)
{ {
unsigned long flags; unsigned long flags;
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
......
...@@ -409,7 +409,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) ...@@ -409,7 +409,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
return len; return len;
} }
void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{ {
BT_DBG("dlc %p state %ld", d, d->state); BT_DBG("dlc %p state %ld", d, d->state);
...@@ -420,7 +420,7 @@ void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) ...@@ -420,7 +420,7 @@ void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
rfcomm_schedule(RFCOMM_SCHED_TX); rfcomm_schedule(RFCOMM_SCHED_TX);
} }
void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
{ {
BT_DBG("dlc %p state %ld", d, d->state); BT_DBG("dlc %p state %ld", d, d->state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment