diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-02-08 04:19:53 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-08 09:22:31 -0800 |
commit | 7ad5b3a505e68cfdc342933d6e0fc0eaa5e0a4f7 (patch) | |
tree | 6715ffd8df509d3d53dea581bb97418a21bc7cbc /kernel/sched.c | |
parent | fc9b52cd8f5f459b88adcf67c47668425ae31a78 (diff) |
kernel: remove fastcall in kernel/*
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 9474b23c28b..3eedd526090 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1893,13 +1893,13 @@ out: return success; } -int fastcall wake_up_process(struct task_struct *p) +int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_ALL, 0); } EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(struct task_struct *p, unsigned int state) +int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); } @@ -1986,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags) * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) +void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; struct rq *rq; @@ -3753,7 +3753,7 @@ void scheduler_tick(void) #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) -void fastcall add_preempt_count(int val) +void add_preempt_count(int val) { /* * Underflow? @@ -3769,7 +3769,7 @@ void fastcall add_preempt_count(int val) } EXPORT_SYMBOL(add_preempt_count); -void fastcall sub_preempt_count(int val) +void sub_preempt_count(int val) { /* * Underflow? @@ -4067,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, * @nr_exclusive: how many wake-one or wake-many threads to wake up * @key: is directly passed to the wakeup function */ -void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, void *key) { unsigned long flags; @@ -4081,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up); /* * Same as __wake_up but called with the spinlock in wait_queue_head_t held. */ -void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) { __wake_up_common(q, mode, 1, 0, NULL); } @@ -4099,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) * * On UP it can prevent extra preemption. */ -void fastcall +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) { unsigned long flags; |