From: Gerd Knorr <kraxel@suse.de>

This is a bunch of fixes for the regparm stuff, for all the fastcall
functions delaration and definition must match too, in both cases (with and
without CONFIG_REGPARM).  Current gcc fails due to mismatches.

Other architectures which have private FASTCALL stuff probably need
adaptions for this, I've done i386 and uml only for now.


---

 arch/i386/kernel/process.c  |    2 +-
 arch/i386/kernel/signal.c   |    2 +-
 arch/i386/kernel/vm86.c     |    2 +-
 drivers/net/ns83820.c       |    8 ++++----
 fs/aio.c                    |   16 ++++++++--------
 fs/buffer.c                 |    4 ++--
 fs/fcntl.c                  |    2 +-
 fs/file_table.c             |    8 ++++----
 fs/namei.c                  |    8 ++++----
 fs/open.c                   |    4 ++--
 include/asm-i386/linkage.h  |    1 +
 include/asm-um/linkage.h    |    1 +
 include/linux/linkage.h     |    1 +
 kernel/exit.c               |    4 ++--
 kernel/fork.c               |   14 +++++++-------
 kernel/pid.c                |   10 +++++-----
 kernel/rcupdate.c           |    2 +-
 kernel/sched.c              |   30 +++++++++++++++---------------
 kernel/signal.c             |    2 +-
 kernel/softirq.c            |    8 ++++----
 kernel/timer.c              |    2 +-
 kernel/workqueue.c          |   10 +++++-----
 lib/rwsem-spinlock.c        |   16 ++++++++--------
 lib/rwsem.c                 |    8 ++++----
 mm/filemap.c                |    8 ++++----
 mm/highmem.c                |    4 ++--
 mm/memory.c                 |    6 +++---
 mm/page_alloc.c             |   16 ++++++++--------
 mm/rmap.c                   |   10 +++++-----
 mm/slab.c                   |    2 +-
 mm/swap.c                   |   10 +++++-----
 net/bluetooth/rfcomm/core.c |    4 ++--
 32 files changed, 114 insertions(+), 111 deletions(-)

diff -puN arch/i386/kernel/process.c~fastcall-warning-fixes arch/i386/kernel/process.c
--- 25/arch/i386/kernel/process.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/arch/i386/kernel/process.c	2004-02-28 18:13:43.000000000 -0800
@@ -493,7 +493,7 @@ int dump_task_regs(struct task_struct *t
  * the task-switch, and shows up in ret_from_fork in entry.S,
  * for example.
  */
-struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
 	struct thread_struct *prev = &prev_p->thread,
 				 *next = &next_p->thread;
diff -puN arch/i386/kernel/signal.c~fastcall-warning-fixes arch/i386/kernel/signal.c
--- 25/arch/i386/kernel/signal.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/arch/i386/kernel/signal.c	2004-02-28 18:13:43.000000000 -0800
@@ -551,7 +551,7 @@ handle_signal(unsigned long sig, siginfo
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-int do_signal(struct pt_regs *regs, sigset_t *oldset)
+int fastcall do_signal(struct pt_regs *regs, sigset_t *oldset)
 {
 	siginfo_t info;
 	int signr;
diff -puN arch/i386/kernel/vm86.c~fastcall-warning-fixes arch/i386/kernel/vm86.c
--- 25/arch/i386/kernel/vm86.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/arch/i386/kernel/vm86.c	2004-02-28 18:13:43.000000000 -0800
@@ -95,7 +95,7 @@
 #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1)
 
 struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
-struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs)
+struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
 {
 	struct tss_struct *tss;
 	struct pt_regs *ret;
diff -puN drivers/net/ns83820.c~fastcall-warning-fixes drivers/net/ns83820.c
--- 25/drivers/net/ns83820.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/drivers/net/ns83820.c	2004-02-28 18:13:43.000000000 -0800
@@ -598,7 +598,7 @@ static inline int rx_refill(struct net_d
 }
 
 static void FASTCALL(rx_refill_atomic(struct net_device *ndev));
-static void rx_refill_atomic(struct net_device *ndev)
+static void fastcall rx_refill_atomic(struct net_device *ndev)
 {
 	rx_refill(ndev, GFP_ATOMIC);
 }
@@ -620,7 +620,7 @@ static inline void clear_rx_desc(struct 
 }
 
 static void FASTCALL(phy_intr(struct net_device *ndev));
-static void phy_intr(struct net_device *ndev)
+static void fastcall phy_intr(struct net_device *ndev)
 {
 	struct ns83820 *dev = PRIV(ndev);
 	static char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
@@ -807,7 +807,7 @@ static void ns83820_cleanup_rx(struct ns
 }
 
 static void FASTCALL(ns83820_rx_kick(struct net_device *ndev));
-static void ns83820_rx_kick(struct net_device *ndev)
+static void fastcall ns83820_rx_kick(struct net_device *ndev)
 {
 	struct ns83820 *dev = PRIV(ndev);
 	/*if (nr_rx_empty(dev) >= NR_RX_DESC/4)*/ {
@@ -829,7 +829,7 @@ static void ns83820_rx_kick(struct net_d
  *	
  */
 static void FASTCALL(rx_irq(struct net_device *ndev));
-static void rx_irq(struct net_device *ndev)
+static void fastcall rx_irq(struct net_device *ndev)
 {
 	struct ns83820 *dev = PRIV(ndev);
 	struct rx_info *info = &dev->rx_info;
diff -puN fs/aio.c~fastcall-warning-fixes fs/aio.c
--- 25/fs/aio.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/aio.c	2004-02-28 18:13:43.000000000 -0800
@@ -312,7 +312,7 @@ void wait_for_all_aios(struct kioctx *ct
 /* wait_on_sync_kiocb:
  *	Waits on the given sync kiocb to complete.
  */
-ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
+ssize_t fastcall wait_on_sync_kiocb(struct kiocb *iocb)
 {
 	while (iocb->ki_users) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
@@ -331,7 +331,7 @@ ssize_t wait_on_sync_kiocb(struct kiocb 
  * go away, they will call put_ioctx and release any pinned memory
  * associated with the request (held via struct page * references).
  */
-void exit_aio(struct mm_struct *mm)
+void fastcall exit_aio(struct mm_struct *mm)
 {
 	struct kioctx *ctx = mm->ioctx_list;
 	mm->ioctx_list = NULL;
@@ -356,7 +356,7 @@ void exit_aio(struct mm_struct *mm)
  *	Called when the last user of an aio context has gone away,
  *	and the struct needs to be freed.
  */
-void __put_ioctx(struct kioctx *ctx)
+void fastcall __put_ioctx(struct kioctx *ctx)
 {
 	unsigned nr_events = ctx->max_reqs;
 
@@ -383,7 +383,7 @@ void __put_ioctx(struct kioctx *ctx)
  * req (after submitting it) and aio_complete() freeing the req.
  */
 static struct kiocb *FASTCALL(__aio_get_req(struct kioctx *ctx));
-static struct kiocb *__aio_get_req(struct kioctx *ctx)
+static struct kiocb fastcall *__aio_get_req(struct kioctx *ctx)
 {
 	struct kiocb *req = NULL;
 	struct aio_ring *ring;
@@ -509,7 +509,7 @@ static int __aio_put_req(struct kioctx *
  *	Returns true if this put was the last user of the kiocb,
  *	false if the request is still in use.
  */
-int aio_put_req(struct kiocb *req)
+int fastcall aio_put_req(struct kiocb *req)
 {
 	struct kioctx *ctx = req->ki_ctx;
 	int ret;
@@ -596,7 +596,7 @@ static void aio_kick_handler(void *data)
 	unuse_mm(ctx->mm);
 }
 
-void kick_iocb(struct kiocb *iocb)
+void fastcall kick_iocb(struct kiocb *iocb)
 {
 	struct kioctx	*ctx = iocb->ki_ctx;
 
@@ -622,7 +622,7 @@ void kick_iocb(struct kiocb *iocb)
  *	Returns true if this is the last user of the request.  The 
  *	only other user of the request can be the cancellation code.
  */
-int aio_complete(struct kiocb *iocb, long res, long res2)
+int fastcall aio_complete(struct kiocb *iocb, long res, long res2)
 {
 	struct kioctx	*ctx = iocb->ki_ctx;
 	struct aio_ring_info	*info;
@@ -985,7 +985,7 @@ asmlinkage long sys_io_destroy(aio_conte
 	return -EINVAL;
 }
 
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+int fastcall io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 			 struct iocb *iocb)
 {
 	struct kiocb *req;
diff -puN fs/buffer.c~fastcall-warning-fixes fs/buffer.c
--- 25/fs/buffer.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/buffer.c	2004-02-28 18:13:43.000000000 -0800
@@ -97,7 +97,7 @@ void wake_up_buffer(struct buffer_head *
 }
 EXPORT_SYMBOL(wake_up_buffer);
 
-void unlock_buffer(struct buffer_head *bh)
+void fastcall unlock_buffer(struct buffer_head *bh)
 {
 	/*
 	 * unlock_buffer against a zero-count bh is a bug, if the page
@@ -1260,7 +1260,7 @@ __getblk_slow(struct block_device *bdev,
  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
  * mapping->page_lock and the global inode_lock.
  */
-void mark_buffer_dirty(struct buffer_head *bh)
+void fastcall mark_buffer_dirty(struct buffer_head *bh)
 {
 	if (!buffer_uptodate(bh))
 		buffer_error();
diff -puN fs/fcntl.c~fastcall-warning-fixes fs/fcntl.c
--- 25/fs/fcntl.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/fcntl.c	2004-02-28 18:13:43.000000000 -0800
@@ -19,7 +19,7 @@
 #include <asm/siginfo.h>
 #include <asm/uaccess.h>
 
-void set_close_on_exec(unsigned int fd, int flag)
+void fastcall set_close_on_exec(unsigned int fd, int flag)
 {
 	struct files_struct *files = current->files;
 	spin_lock(&files->file_lock);
diff -puN fs/file_table.c~fastcall-warning-fixes fs/file_table.c
--- 25/fs/file_table.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/file_table.c	2004-02-28 18:13:43.000000000 -0800
@@ -152,7 +152,7 @@ void close_private_file(struct file *fil
 
 EXPORT_SYMBOL(close_private_file);
 
-void fput(struct file *file)
+void fastcall fput(struct file *file)
 {
 	if (atomic_dec_and_test(&file->f_count))
 		__fput(file);
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(fput);
 /* __fput is called from task context when aio completion releases the last
  * last use of a struct file *.  Do not use otherwise.
  */
-void __fput(struct file *file)
+void fastcall __fput(struct file *file)
 {
 	struct dentry *dentry = file->f_dentry;
 	struct vfsmount *mnt = file->f_vfsmnt;
@@ -192,7 +192,7 @@ void __fput(struct file *file)
 	mntput(mnt);
 }
 
-struct file *fget(unsigned int fd)
+struct file fastcall *fget(unsigned int fd)
 {
 	struct file *file;
 	struct files_struct *files = current->files;
@@ -214,7 +214,7 @@ EXPORT_SYMBOL(fget);
  * and a flag is returned to be passed to the corresponding fput_light().
  * There must not be a cloning between an fget_light/fput_light pair.
  */
-struct file *fget_light(unsigned int fd, int *fput_needed)
+struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
 {
 	struct file *file;
 	struct files_struct *files = current->files;
diff -puN fs/namei.c~fastcall-warning-fixes fs/namei.c
--- 25/fs/namei.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/namei.c	2004-02-28 18:13:43.000000000 -0800
@@ -571,7 +571,7 @@ fail:
  *
  * We expect 'base' to be positive and a directory.
  */
-int link_path_walk(const char * name, struct nameidata *nd)
+int fastcall link_path_walk(const char * name, struct nameidata *nd)
 {
 	struct path next;
 	struct inode *inode;
@@ -771,7 +771,7 @@ return_err:
 	return err;
 }
 
-int path_walk(const char * name, struct nameidata *nd)
+int fastcall path_walk(const char * name, struct nameidata *nd)
 {
 	current->total_link_count = 0;
 	return link_path_walk(name, nd);
@@ -858,7 +858,7 @@ walk_init_root(const char *name, struct 
 	return 1;
 }
 
-int path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
+int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
 {
 	nd->last_type = LAST_ROOT; /* if there are only slashes... */
 	nd->flags = flags;
@@ -971,7 +971,7 @@ access:
  * that namei follows links, while lnamei does not.
  * SMP-safe
  */
-int __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
+int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd)
 {
 	char *tmp = getname(name);
 	int err = PTR_ERR(tmp);
diff -puN fs/open.c~fastcall-warning-fixes fs/open.c
--- 25/fs/open.c~fastcall-warning-fixes	2004-02-28 18:13:42.000000000 -0800
+++ 25-akpm/fs/open.c	2004-02-28 18:13:43.000000000 -0800
@@ -890,7 +890,7 @@ static inline void __put_unused_fd(struc
 		files->next_fd = fd;
 }
 
-void put_unused_fd(unsigned int fd)
+void fastcall put_unused_fd(unsigned int fd)
 {
 	struct files_struct *files = current->files;
 	spin_lock(&files->file_lock);
@@ -913,7 +913,7 @@ EXPORT_SYMBOL(put_unused_fd);
  * will follow.
  */
 
-void fd_install(unsigned int fd, struct file * file)
+void fastcall fd_install(unsigned int fd, struct file * file)
 {
 	struct files_struct *files = current->files;
 	spin_lock(&files->file_lock);
diff -puN include/asm-i386/linkage.h~fastcall-warning-fixes include/asm-i386/linkage.h
--- 25/include/asm-i386/linkage.h~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/include/asm-i386/linkage.h	2004-02-28 18:13:43.000000000 -0800
@@ -3,6 +3,7 @@
 
 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
 #define FASTCALL(x)	x __attribute__((regparm(3)))
+#define fastcall	__attribute__((regparm(3)))
 
 #ifdef CONFIG_X86_ALIGNMENT_16
 #define __ALIGN .align 16,0x90
diff -puN include/asm-um/linkage.h~fastcall-warning-fixes include/asm-um/linkage.h
--- 25/include/asm-um/linkage.h~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/include/asm-um/linkage.h	2004-02-28 18:13:43.000000000 -0800
@@ -2,5 +2,6 @@
 #define __ASM_LINKAGE_H
 
 #define FASTCALL(x)	x __attribute__((regparm(3)))
+#define fastcall        __attribute__((regparm(3)))
 
 #endif
diff -puN include/linux/linkage.h~fastcall-warning-fixes include/linux/linkage.h
--- 25/include/linux/linkage.h~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/include/linux/linkage.h	2004-02-28 18:13:43.000000000 -0800
@@ -37,6 +37,7 @@
 
 #ifndef FASTCALL
 #define FASTCALL(x)	x
+#define fastcall
 #endif
 
 #endif
diff -puN kernel/exit.c~fastcall-warning-fixes kernel/exit.c
--- 25/kernel/exit.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/exit.c	2004-02-28 18:13:43.000000000 -0800
@@ -386,7 +386,7 @@ static inline void close_files(struct fi
 	}
 }
 
-void put_files_struct(struct files_struct *files)
+void fastcall put_files_struct(struct files_struct *files)
 {
 	if (atomic_dec_and_test(&files->count)) {
 		close_files(files);
@@ -810,7 +810,7 @@ asmlinkage long sys_exit(int error_code)
 	do_exit((error_code&0xff)<<8);
 }
 
-task_t *next_thread(task_t *p)
+task_t fastcall *next_thread(task_t *p)
 {
 	struct pid_link *link = p->pids + PIDTYPE_TGID;
 	struct list_head *tmp, *head = &link->pidptr->task_list;
diff -puN kernel/fork.c~fastcall-warning-fixes kernel/fork.c
--- 25/kernel/fork.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/fork.c	2004-02-28 18:13:43.000000000 -0800
@@ -91,7 +91,7 @@ void __put_task_struct(struct task_struc
 	free_task(tsk);
 }
 
-void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
 {
 	unsigned long flags;
 
@@ -103,7 +103,7 @@ void add_wait_queue(wait_queue_head_t *q
 
 EXPORT_SYMBOL(add_wait_queue);
 
-void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
 {
 	unsigned long flags;
 
@@ -115,7 +115,7 @@ void add_wait_queue_exclusive(wait_queue
 
 EXPORT_SYMBOL(add_wait_queue_exclusive);
 
-void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
+void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
 {
 	unsigned long flags;
 
@@ -139,7 +139,7 @@ EXPORT_SYMBOL(remove_wait_queue);
  * stops them from bleeding out - it would still allow subsequent
  * loads to move into the the critical region).
  */
-void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
+void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
 {
 	unsigned long flags;
 
@@ -153,7 +153,7 @@ void prepare_to_wait(wait_queue_head_t *
 
 EXPORT_SYMBOL(prepare_to_wait);
 
-void
+void fastcall
 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
 {
 	unsigned long flags;
@@ -168,7 +168,7 @@ prepare_to_wait_exclusive(wait_queue_hea
 
 EXPORT_SYMBOL(prepare_to_wait_exclusive);
 
-void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
+void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
 {
 	unsigned long flags;
 
@@ -418,7 +418,7 @@ struct mm_struct * mm_alloc(void)
  * is dropped: either by a lazy thread or by
  * mmput. Free the page directory and the mm.
  */
-void __mmdrop(struct mm_struct *mm)
+void fastcall __mmdrop(struct mm_struct *mm)
 {
 	BUG_ON(mm == &init_mm);
 	mm_free_pgd(mm);
diff -puN kernel/pid.c~fastcall-warning-fixes kernel/pid.c
--- 25/kernel/pid.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/pid.c	2004-02-28 18:13:43.000000000 -0800
@@ -57,7 +57,7 @@ static pidmap_t *map_limit = pidmap_arra
 
 static spinlock_t pidmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
 
-inline void free_pidmap(int pid)
+fastcall void free_pidmap(int pid)
 {
 	pidmap_t *map = pidmap_array + pid / BITS_PER_PAGE;
 	int offset = pid & BITS_PER_PAGE_MASK;
@@ -148,7 +148,7 @@ failure:
 	return -1;
 }
 
-inline struct pid *find_pid(enum pid_type type, int nr)
+fastcall struct pid *find_pid(enum pid_type type, int nr)
 {
 	struct list_head *elem, *bucket = &pid_hash[type][pid_hashfn(nr)];
 	struct pid *pid;
@@ -161,14 +161,14 @@ inline struct pid *find_pid(enum pid_typ
 	return NULL;
 }
 
-void link_pid(task_t *task, struct pid_link *link, struct pid *pid)
+void fastcall link_pid(task_t *task, struct pid_link *link, struct pid *pid)
 {
 	atomic_inc(&pid->count);
 	list_add_tail(&link->pid_chain, &pid->task_list);
 	link->pidptr = pid;
 }
 
-int attach_pid(task_t *task, enum pid_type type, int nr)
+int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
 {
 	struct pid *pid = find_pid(type, nr);
 
@@ -211,7 +211,7 @@ static void _detach_pid(task_t *task, en
 	__detach_pid(task, type);
 }
 
-void detach_pid(task_t *task, enum pid_type type)
+void fastcall detach_pid(task_t *task, enum pid_type type)
 {
 	int nr = __detach_pid(task, type);
 
diff -puN kernel/rcupdate.c~fastcall-warning-fixes kernel/rcupdate.c
--- 25/kernel/rcupdate.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/rcupdate.c	2004-02-28 18:13:43.000000000 -0800
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct tasklet_str
  * The read-side of critical section that use call_rcu() for updation must 
  * be protected by rcu_read_lock()/rcu_read_unlock().
  */
-void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
+void fastcall call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
 {
 	int cpu;
 	unsigned long flags;
diff -puN kernel/sched.c~fastcall-warning-fixes kernel/sched.c
--- 25/kernel/sched.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/sched.c	2004-02-28 18:13:43.000000000 -0800
@@ -810,7 +810,7 @@ out:
 
 	return success;
 }
-int wake_up_process(task_t * p)
+int fastcall wake_up_process(task_t * p)
 {
 	return try_to_wake_up(p, TASK_STOPPED |
 		       		 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
@@ -818,7 +818,7 @@ int wake_up_process(task_t * p)
 
 EXPORT_SYMBOL(wake_up_process);
 
-int wake_up_state(task_t *p, unsigned int state)
+int fastcall wake_up_state(task_t *p, unsigned int state)
 {
 	return try_to_wake_up(p, state, 0);
 }
@@ -827,7 +827,7 @@ int wake_up_state(task_t *p, unsigned in
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
  */
-void sched_fork(task_t *p)
+void fastcall sched_fork(task_t *p)
 {
 	/*
 	 * We mark the process as running here, but have not actually
@@ -883,7 +883,7 @@ void sched_fork(task_t *p)
  * This function will do some initial scheduler statistics housekeeping
  * that must be done for every newly created process.
  */
-void wake_up_forked_process(task_t * p)
+void fastcall wake_up_forked_process(task_t * p)
 {
 	unsigned long flags;
 	runqueue_t *rq = task_rq_lock(current, &flags);
@@ -927,7 +927,7 @@ void wake_up_forked_process(task_t * p)
  * artificially, because any timeslice recovered here
  * was given away by the parent in the first place.)
  */
-void sched_exit(task_t * p)
+void fastcall sched_exit(task_t * p)
 {
 	unsigned long flags;
 	runqueue_t *rq;
@@ -2134,7 +2134,7 @@ static void __wake_up_common(wait_queue_
  * @mode: which threads
  * @nr_exclusive: how many wake-one or wake-many threads to wake up
  */
-void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
 	unsigned long flags;
 
@@ -2148,7 +2148,7 @@ EXPORT_SYMBOL(__wake_up);
 /*
  * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  */
-void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
+void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
 {
 	__wake_up_common(q, mode, 1, 0);
 }
@@ -2166,7 +2166,7 @@ void __wake_up_locked(wait_queue_head_t 
  *
  * On UP it can prevent extra preemption.
  */
-void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
+void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
 {
 	unsigned long flags;
 
@@ -2183,7 +2183,7 @@ void __wake_up_sync(wait_queue_head_t *q
 
 EXPORT_SYMBOL_GPL(__wake_up_sync);	/* For internal use only */
 
-void complete(struct completion *x)
+void fastcall complete(struct completion *x)
 {
 	unsigned long flags;
 
@@ -2196,7 +2196,7 @@ void complete(struct completion *x)
 
 EXPORT_SYMBOL(complete);
 
-void complete_all(struct completion *x)
+void fastcall complete_all(struct completion *x)
 {
 	unsigned long flags;
 
@@ -2207,7 +2207,7 @@ void complete_all(struct completion *x)
 	spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 
-void wait_for_completion(struct completion *x)
+void fastcall wait_for_completion(struct completion *x)
 {
 	might_sleep();
 	spin_lock_irq(&x->wait.lock);
@@ -2254,7 +2254,7 @@ EXPORT_SYMBOL(wait_for_completion);
 
 static int sleep_on_bkl_warnings;
 
-void interruptible_sleep_on(wait_queue_head_t *q)
+void fastcall interruptible_sleep_on(wait_queue_head_t *q)
 {
 	SLEEP_ON_VAR
 
@@ -2269,7 +2269,7 @@ void interruptible_sleep_on(wait_queue_h
 
 EXPORT_SYMBOL(interruptible_sleep_on);
 
-long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
 	SLEEP_ON_VAR
 
@@ -2286,7 +2286,7 @@ long interruptible_sleep_on_timeout(wait
 
 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
 
-void sleep_on(wait_queue_head_t *q)
+void fastcall sleep_on(wait_queue_head_t *q)
 {
 	SLEEP_ON_VAR
 
@@ -2301,7 +2301,7 @@ void sleep_on(wait_queue_head_t *q)
 
 EXPORT_SYMBOL(sleep_on);
 
-long sleep_on_timeout(wait_queue_head_t *q, long timeout)
+long fastcall sleep_on_timeout(wait_queue_head_t *q, long timeout)
 {
 	SLEEP_ON_VAR
 
diff -puN kernel/signal.c~fastcall-warning-fixes kernel/signal.c
--- 25/kernel/signal.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/signal.c	2004-02-28 18:13:43.000000000 -0800
@@ -214,7 +214,7 @@ static inline int has_pending_signals(si
 
 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
 
-inline void recalc_sigpending_tsk(struct task_struct *t)
+fastcall void recalc_sigpending_tsk(struct task_struct *t)
 {
 	if (t->signal->group_stop_count > 0 ||
 	    PENDING(&t->pending, &t->blocked) ||
diff -puN kernel/softirq.c~fastcall-warning-fixes kernel/softirq.c
--- 25/kernel/softirq.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/softirq.c	2004-02-28 18:13:43.000000000 -0800
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(local_bh_enable);
 /*
  * This function must run with irqs disabled!
  */
-inline void raise_softirq_irqoff(unsigned int nr)
+inline fastcall void raise_softirq_irqoff(unsigned int nr)
 {
 	__raise_softirq_irqoff(nr);
 
@@ -160,7 +160,7 @@ inline void raise_softirq_irqoff(unsigne
 
 EXPORT_SYMBOL(raise_softirq_irqoff);
 
-void raise_softirq(unsigned int nr)
+void fastcall raise_softirq(unsigned int nr)
 {
 	unsigned long flags;
 
@@ -190,7 +190,7 @@ struct tasklet_head
 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
 
-void __tasklet_schedule(struct tasklet_struct *t)
+void fastcall __tasklet_schedule(struct tasklet_struct *t)
 {
 	unsigned long flags;
 
@@ -203,7 +203,7 @@ void __tasklet_schedule(struct tasklet_s
 
 EXPORT_SYMBOL(__tasklet_schedule);
 
-void __tasklet_hi_schedule(struct tasklet_struct *t)
+void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
 {
 	unsigned long flags;
 
diff -puN kernel/timer.c~fastcall-warning-fixes kernel/timer.c
--- 25/kernel/timer.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/timer.c	2004-02-28 18:13:43.000000000 -0800
@@ -1003,7 +1003,7 @@ static void futex_timeout(unsigned long 
  *
  * In all cases the return value is guaranteed to be non-negative.
  */
-signed long schedule_timeout(signed long timeout)
+fastcall signed long schedule_timeout(signed long timeout)
 {
 	struct timer_list timer;
 	unsigned long expire;
diff -puN kernel/workqueue.c~fastcall-warning-fixes kernel/workqueue.c
--- 25/kernel/workqueue.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/kernel/workqueue.c	2004-02-28 18:13:43.000000000 -0800
@@ -94,7 +94,7 @@ static void __queue_work(struct cpu_work
  * We queue the work to the CPU it was submitted, but there is no
  * guarantee that it will be processed by that CPU.
  */
-int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
 	int ret = 0, cpu = get_cpu();
 
@@ -115,7 +115,7 @@ static void delayed_work_timer_fn(unsign
 	__queue_work(wq->cpu_wq + smp_processor_id(), work);
 }
 
-int queue_delayed_work(struct workqueue_struct *wq,
+int fastcall queue_delayed_work(struct workqueue_struct *wq,
 			struct work_struct *work, unsigned long delay)
 {
 	int ret = 0;
@@ -217,7 +217,7 @@ static int worker_thread(void *__cwq)
  * This function used to run the workqueues itself.  Now we just wait for the
  * helper threads to do it.
  */
-void flush_workqueue(struct workqueue_struct *wq)
+void fastcall flush_workqueue(struct workqueue_struct *wq)
 {
 	struct cpu_workqueue_struct *cwq;
 	int cpu;
@@ -340,12 +340,12 @@ void destroy_workqueue(struct workqueue_
 
 static struct workqueue_struct *keventd_wq;
 
-int schedule_work(struct work_struct *work)
+int fastcall schedule_work(struct work_struct *work)
 {
 	return queue_work(keventd_wq, work);
 }
 
-int schedule_delayed_work(struct work_struct *work, unsigned long delay)
+int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
 {
 	return queue_delayed_work(keventd_wq, work, delay);
 }
diff -puN lib/rwsem.c~fastcall-warning-fixes lib/rwsem.c
--- 25/lib/rwsem.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/lib/rwsem.c	2004-02-28 18:13:43.000000000 -0800
@@ -162,7 +162,7 @@ static inline struct rw_semaphore *rwsem
 /*
  * wait for the read lock to be granted
  */
-struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_down_read_failed(struct rw_semaphore *sem)
 {
 	struct rwsem_waiter waiter;
 
@@ -178,7 +178,7 @@ struct rw_semaphore *rwsem_down_read_fai
 /*
  * wait for the write lock to be granted
  */
-struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_down_write_failed(struct rw_semaphore *sem)
 {
 	struct rwsem_waiter waiter;
 
@@ -195,7 +195,7 @@ struct rw_semaphore *rwsem_down_write_fa
  * handle waking up a waiter on the semaphore
  * - up_read has decremented the active part of the count if we come here
  */
-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
 {
 	rwsemtrace(sem,"Entering rwsem_wake");
 
@@ -217,7 +217,7 @@ struct rw_semaphore *rwsem_wake(struct r
  * - caller incremented waiting part of count, and discovered it to be still negative
  * - just wake up any readers at the front of the queue
  */
-struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
+struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
 	rwsemtrace(sem,"Entering rwsem_downgrade_wake");
 
diff -puN lib/rwsem-spinlock.c~fastcall-warning-fixes lib/rwsem-spinlock.c
--- 25/lib/rwsem-spinlock.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/lib/rwsem-spinlock.c	2004-02-28 18:13:43.000000000 -0800
@@ -29,7 +29,7 @@ void rwsemtrace(struct rw_semaphore *sem
 /*
  * initialise the semaphore
  */
-void init_rwsem(struct rw_semaphore *sem)
+void fastcall init_rwsem(struct rw_semaphore *sem)
 {
 	sem->activity = 0;
 	spin_lock_init(&sem->wait_lock);
@@ -117,7 +117,7 @@ static inline struct rw_semaphore *__rws
 /*
  * get a read lock on the semaphore
  */
-void __down_read(struct rw_semaphore *sem)
+void fastcall __down_read(struct rw_semaphore *sem)
 {
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
@@ -162,7 +162,7 @@ void __down_read(struct rw_semaphore *se
 /*
  * trylock for reading -- returns 1 if successful, 0 if contention
  */
-int __down_read_trylock(struct rw_semaphore *sem)
+int fastcall __down_read_trylock(struct rw_semaphore *sem)
 {
 	int ret = 0;
 	rwsemtrace(sem,"Entering __down_read_trylock");
@@ -185,7 +185,7 @@ int __down_read_trylock(struct rw_semaph
  * get a write lock on the semaphore
  * - note that we increment the waiting count anyway to indicate an exclusive lock
  */
-void __down_write(struct rw_semaphore *sem)
+void fastcall __down_write(struct rw_semaphore *sem)
 {
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
@@ -230,7 +230,7 @@ void __down_write(struct rw_semaphore *s
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
-int __down_write_trylock(struct rw_semaphore *sem)
+int fastcall __down_write_trylock(struct rw_semaphore *sem)
 {
 	int ret = 0;
 	rwsemtrace(sem,"Entering __down_write_trylock");
@@ -252,7 +252,7 @@ int __down_write_trylock(struct rw_semap
 /*
  * release a read lock on the semaphore
  */
-void __up_read(struct rw_semaphore *sem)
+void fastcall __up_read(struct rw_semaphore *sem)
 {
 	rwsemtrace(sem,"Entering __up_read");
 
@@ -269,7 +269,7 @@ void __up_read(struct rw_semaphore *sem)
 /*
  * release a write lock on the semaphore
  */
-void __up_write(struct rw_semaphore *sem)
+void fastcall __up_write(struct rw_semaphore *sem)
 {
 	rwsemtrace(sem,"Entering __up_write");
 
@@ -288,7 +288,7 @@ void __up_write(struct rw_semaphore *sem
  * downgrade a write lock into a read lock
  * - just wake up any readers at the front of the queue
  */
-void __downgrade_write(struct rw_semaphore *sem)
+void fastcall __downgrade_write(struct rw_semaphore *sem)
 {
 	rwsemtrace(sem,"Entering __downgrade_write");
 
diff -puN mm/filemap.c~fastcall-warning-fixes mm/filemap.c
--- 25/mm/filemap.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/filemap.c	2004-02-28 18:13:43.000000000 -0800
@@ -292,7 +292,7 @@ static wait_queue_head_t *page_waitqueue
 	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+void fastcall wait_on_page_bit(struct page *page, int bit_nr)
 {
 	wait_queue_head_t *waitqueue = page_waitqueue(page);
 	DEFINE_WAIT(wait);
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * the clear_bit and the read of the waitqueue (to avoid SMP races with a
  * parallel wait_on_page_locked()).
  */
-void unlock_page(struct page *page)
+void fastcall unlock_page(struct page *page)
 {
 	wait_queue_head_t *waitqueue = page_waitqueue(page);
 	smp_mb__before_clear_bit();
@@ -365,7 +365,7 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void __lock_page(struct page *page)
+void fastcall __lock_page(struct page *page)
 {
 	wait_queue_head_t *wqh = page_waitqueue(page);
 	DEFINE_WAIT(wait);
@@ -953,7 +953,7 @@ asmlinkage ssize_t sys_readahead(int fd,
  * and schedules an I/O to read in its contents from disk.
  */
 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
-static int page_cache_read(struct file * file, unsigned long offset)
+static int fastcall page_cache_read(struct file * file, unsigned long offset)
 {
 	struct address_space *mapping = file->f_mapping;
 	struct page *page; 
diff -puN mm/highmem.c~fastcall-warning-fixes mm/highmem.c
--- 25/mm/highmem.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/highmem.c	2004-02-28 18:13:43.000000000 -0800
@@ -147,7 +147,7 @@ start:
 	return vaddr;
 }
 
-void *kmap_high(struct page *page)
+void fastcall *kmap_high(struct page *page)
 {
 	unsigned long vaddr;
 
@@ -170,7 +170,7 @@ void *kmap_high(struct page *page)
 
 EXPORT_SYMBOL(kmap_high);
 
-void kunmap_high(struct page *page)
+void fastcall kunmap_high(struct page *page)
 {
 	unsigned long vaddr;
 	unsigned long nr;
diff -puN mm/memory.c~fastcall-warning-fixes mm/memory.c
--- 25/mm/memory.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/memory.c	2004-02-28 18:13:43.000000000 -0800
@@ -145,7 +145,7 @@ void clear_page_tables(struct mmu_gather
 	} while (--nr);
 }
 
-pte_t * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
 	if (!pmd_present(*pmd)) {
 		struct page *new;
@@ -171,7 +171,7 @@ out:
 	return pte_offset_map(pmd, address);
 }
 
-pte_t * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
+pte_t fastcall * pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
 	if (!pmd_present(*pmd)) {
 		pte_t *new;
@@ -1669,7 +1669,7 @@ int handle_mm_fault(struct mm_struct *mm
  * On a two-level page table, this ends up actually being entirely
  * optimized away.
  */
-pmd_t *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
 	pmd_t *new;
 
diff -puN mm/page_alloc.c~fastcall-warning-fixes mm/page_alloc.c
--- 25/mm/page_alloc.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/page_alloc.c	2004-02-28 18:13:43.000000000 -0800
@@ -451,7 +451,7 @@ void drain_local_pages(void)
  * Free a 0-order page
  */
 static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
-static void free_hot_cold_page(struct page *page, int cold)
+static void fastcall free_hot_cold_page(struct page *page, int cold)
 {
 	struct zone *zone = page_zone(page);
 	struct per_cpu_pages *pcp;
@@ -470,12 +470,12 @@ static void free_hot_cold_page(struct pa
 	put_cpu();
 }
 
-void free_hot_page(struct page *page)
+void fastcall free_hot_page(struct page *page)
 {
 	free_hot_cold_page(page, 0);
 }
 	
-void free_cold_page(struct page *page)
+void fastcall free_cold_page(struct page *page)
 {
 	free_hot_cold_page(page, 1);
 }
@@ -540,7 +540,7 @@ static struct page *buffered_rmqueue(str
  * sized machine, GFP_HIGHMEM and GFP_KERNEL requests basically leave the DMA
  * zone untouched.
  */
-struct page *
+struct page * fastcall
 __alloc_pages(unsigned int gfp_mask, unsigned int order,
 		struct zonelist *zonelist)
 {
@@ -729,7 +729,7 @@ found_node:
 /*
  * Common helper functions.
  */
-unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
+fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int order)
 {
 	struct page * page;
 
@@ -745,7 +745,7 @@ unsigned long __get_free_pages(unsigned 
 
 EXPORT_SYMBOL(__get_free_pages);
 
-unsigned long get_zeroed_page(unsigned int gfp_mask)
+fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
 {
 	struct page * page;
 
@@ -774,7 +774,7 @@ void __pagevec_free(struct pagevec *pvec
 		free_hot_cold_page(pvec->pages[i], pvec->cold);
 }
 
-void __free_pages(struct page *page, unsigned int order)
+fastcall void __free_pages(struct page *page, unsigned int order)
 {
 	if (!PageReserved(page) && put_page_testzero(page)) {
 		if (order == 0)
@@ -786,7 +786,7 @@ void __free_pages(struct page *page, uns
 
 EXPORT_SYMBOL(__free_pages);
 
-void free_pages(unsigned long addr, unsigned int order)
+fastcall void free_pages(unsigned long addr, unsigned int order)
 {
 	if (addr != 0) {
 		BUG_ON(!virt_addr_valid(addr));
diff -puN mm/rmap.c~fastcall-warning-fixes mm/rmap.c
--- 25/mm/rmap.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/rmap.c	2004-02-28 18:13:43.000000000 -0800
@@ -112,7 +112,7 @@ pte_chain_encode(struct pte_chain *pte_c
  * If the page has a single-entry pte_chain, collapse that back to a PageDirect
  * representation.  This way, it's only done under memory pressure.
  */
-int page_referenced(struct page * page)
+int fastcall page_referenced(struct page * page)
 {
 	struct pte_chain *pc;
 	int referenced = 0;
@@ -165,7 +165,7 @@ int page_referenced(struct page * page)
  * Add a new pte reverse mapping to a page.
  * The caller needs to hold the mm->page_table_lock.
  */
-struct pte_chain *
+struct pte_chain * fastcall
 page_add_rmap(struct page *page, pte_t *ptep, struct pte_chain *pte_chain)
 {
 	pte_addr_t pte_paddr = ptep_to_paddr(ptep);
@@ -221,7 +221,7 @@ out:
  * the page.
  * Caller needs to hold the mm->page_table_lock.
  */
-void page_remove_rmap(struct page *page, pte_t *ptep)
+void fastcall page_remove_rmap(struct page *page, pte_t *ptep)
 {
 	pte_addr_t pte_paddr = ptep_to_paddr(ptep);
 	struct pte_chain *pc;
@@ -293,7 +293,7 @@ out_unlock:
  *		    mm->page_table_lock	try_to_unmap_one(), trylock
  */
 static int FASTCALL(try_to_unmap_one(struct page *, pte_addr_t));
-static int try_to_unmap_one(struct page * page, pte_addr_t paddr)
+static int fastcall try_to_unmap_one(struct page * page, pte_addr_t paddr)
 {
 	pte_t *ptep = rmap_ptep_map(paddr);
 	unsigned long address = ptep_to_address(ptep);
@@ -382,7 +382,7 @@ out_unlock:
  * SWAP_AGAIN	- we missed a trylock, try again later
  * SWAP_FAIL	- the page is unswappable
  */
-int try_to_unmap(struct page * page)
+int fastcall try_to_unmap(struct page * page)
 {
 	struct pte_chain *pc, *next_pc, *start;
 	int ret = SWAP_SUCCESS;
diff -puN mm/slab.c~fastcall-warning-fixes mm/slab.c
--- 25/mm/slab.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/slab.c	2004-02-28 18:13:43.000000000 -0800
@@ -2162,7 +2162,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
  *
  * Currently only used for dentry validation.
  */
-int kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
 {
 	unsigned long addr = (unsigned long) ptr;
 	unsigned long min_addr = PAGE_OFFSET;
diff -puN mm/swap.c~fastcall-warning-fixes mm/swap.c
--- 25/mm/swap.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/mm/swap.c	2004-02-28 18:13:43.000000000 -0800
@@ -79,7 +79,7 @@ int rotate_reclaimable_page(struct page 
 /*
  * FIXME: speed this up?
  */
-void activate_page(struct page *page)
+void fastcall activate_page(struct page *page)
 {
 	struct zone *zone = page_zone(page);
 
@@ -100,7 +100,7 @@ void activate_page(struct page *page)
  * inactive,referenced		->	active,unreferenced
  * active,unreferenced		->	active,referenced
  */
-void mark_page_accessed(struct page *page)
+void fastcall mark_page_accessed(struct page *page)
 {
 	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
 		activate_page(page);
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(mark_page_accessed);
 static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs) = { 0, };
 static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs) = { 0, };
 
-void lru_cache_add(struct page *page)
+void fastcall lru_cache_add(struct page *page)
 {
 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
 
@@ -129,7 +129,7 @@ void lru_cache_add(struct page *page)
 	put_cpu_var(lru_add_pvecs);
 }
 
-void lru_cache_add_active(struct page *page)
+void fastcall lru_cache_add_active(struct page *page)
 {
 	struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
 
@@ -155,7 +155,7 @@ void lru_add_drain(void)
  * This path almost never happens for VM activity - pages are normally
  * freed via pagevecs.  But it gets used by networking.
  */
-void __page_cache_release(struct page *page)
+void fastcall __page_cache_release(struct page *page)
 {
 	unsigned long flags;
 	struct zone *zone = page_zone(page);
diff -puN net/bluetooth/rfcomm/core.c~fastcall-warning-fixes net/bluetooth/rfcomm/core.c
--- 25/net/bluetooth/rfcomm/core.c~fastcall-warning-fixes	2004-02-28 18:13:43.000000000 -0800
+++ 25-akpm/net/bluetooth/rfcomm/core.c	2004-02-28 18:13:43.000000000 -0800
@@ -409,7 +409,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d
 	return len;
 }
 
-void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
+void fastcall __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
 {
 	BT_DBG("dlc %p state %ld", d, d->state);
 
@@ -420,7 +420,7 @@ void __rfcomm_dlc_throttle(struct rfcomm
 	rfcomm_schedule(RFCOMM_SCHED_TX);
 }
 
-void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
+void fastcall __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
 {
 	BT_DBG("dlc %p state %ld", d, d->state);
 

_