diff options
author | Andrew Morton <akpm@osdl.org> | 2006-07-28 22:52:09 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2006-08-06 20:52:16 -0700 |
commit | fed0a6b6fb78cd432863f29e83a458c9fd13a308 (patch) | |
tree | 7347e3962c19f346cbb9f8ca7190fc51ec1a5eb8 /kernel | |
parent | accdd0e1f5080ec98f7bf87c5a8b255539e1ab4b (diff) |
cond_resched() fix
Fix a bug identified by Zou Nan hai <nanhai.zou@intel.com>:
If the system is in state SYSTEM_BOOTING, and need_resched() is true,
cond_resched() returns true even though it didn't reschedule. Consequently
need_resched() remains true and JBD locks up.
Fix that by teaching cond_resched() to only return true if it really did call
schedule().
cond_resched_lock() and cond_resched_softirq() have a problem too. If we're
in SYSTEM_BOOTING state and need_resched() is true, these functions will drop
the lock and will then try to call schedule(), but the SYSTEM_BOOTING state
will prevent schedule() from being called. So on return, need_resched() will
still be true, but cond_resched_lock() has to return 1 to tell the caller that
the lock was dropped. The caller will probably lock up.
Bottom line: if these functions dropped the lock, they _must_ call schedule()
to clear need_resched(). Make it so.
Also, uninline __cond_resched(). It's largeish, and slowpath.
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 25 |
1 files changed, 13 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c13f1bd2df7..61d11691a62 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4044,17 +4044,22 @@ asmlinkage long sys_sched_yield(void) return 0; } -static inline void __cond_resched(void) +static inline int __resched_legal(int expected_preempt_count) +{ + if (unlikely(preempt_count() != expected_preempt_count)) + return 0; + if (unlikely(system_state != SYSTEM_RUNNING)) + return 0; + return 1; +} + +static void __cond_resched(void) { /* * The BKS might be reacquired before we have dropped * PREEMPT_ACTIVE, which could trigger a second * cond_resched() call. */ - if (unlikely(preempt_count())) - return; - if (unlikely(system_state != SYSTEM_RUNNING)) - return; do { add_preempt_count(PREEMPT_ACTIVE); schedule(); @@ -4064,13 +4069,12 @@ static inline void __cond_resched(void) int __sched cond_resched(void) { - if (need_resched()) { + if (need_resched() && __resched_legal(0)) { __cond_resched(); return 1; } return 0; } - EXPORT_SYMBOL(cond_resched); /* @@ -4091,7 +4095,7 @@ int cond_resched_lock(spinlock_t *lock) ret = 1; spin_lock(lock); } - if (need_resched()) { + if (need_resched() && __resched_legal(1)) { _raw_spin_unlock(lock); preempt_enable_no_resched(); __cond_resched(); @@ -4100,14 +4104,13 @@ int cond_resched_lock(spinlock_t *lock) } return ret; } - EXPORT_SYMBOL(cond_resched_lock); int __sched cond_resched_softirq(void) { BUG_ON(!in_softirq()); - if (need_resched()) { + if (need_resched() && __resched_legal(0)) { __local_bh_enable(); __cond_resched(); local_bh_disable(); @@ -4115,10 +4118,8 @@ int __sched cond_resched_softirq(void) } return 0; } - EXPORT_SYMBOL(cond_resched_softirq); - /** * yield - yield the current processor to other threads. * |