diff options
-rw-r--r-- | kernel/sched.c | 10 | ||||
-rw-r--r-- | lib/kernel_lock.c | 2 |
2 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 8b92f40c147..e0fa739a441 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1189,10 +1189,10 @@ static void resched_task(struct task_struct *p) assert_spin_locked(&task_rq(p)->lock); - if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) + if (test_tsk_need_resched(p)) return; - set_tsk_thread_flag(p, TIF_NEED_RESCHED); + set_tsk_need_resched(p); cpu = task_cpu(p); if (cpu == smp_processor_id()) @@ -1248,7 +1248,7 @@ void wake_up_idle_cpu(int cpu) * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ - set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); + set_tsk_need_resched(rq->idle); /* NEED_RESCHED must be visible before we test polling */ smp_mb(); @@ -4740,7 +4740,7 @@ asmlinkage void __sched preempt_schedule(void) * between schedule and now. */ barrier(); - } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); + } while (need_resched()); } EXPORT_SYMBOL(preempt_schedule); @@ -4769,7 +4769,7 @@ asmlinkage void __sched preempt_schedule_irq(void) * between schedule and now. */ barrier(); - } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); + } while (need_resched()); } #endif /* CONFIG_PREEMPT */ diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 01a3c22c1b5..39f1029e352 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); int __lockfunc __reacquire_kernel_lock(void) { while (!_raw_spin_trylock(&kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) + if (need_resched()) return -EAGAIN; cpu_relax(); } |