From 01f23e1630d944f7085cd8fd5793e31ea91c03d8 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Sun, 27 Nov 2011 21:43:10 +0000 Subject: sched/arch: Introduce the finish_arch_post_lock_switch() scheduler callback This callback is called by the scheduler after rq->lock has been released and interrupts enabled. It will be used in subsequent patches on the ARM architecture. Signed-off-by: Catalin Marinas Reviewed-by: Will Deacon Reviewed-by: Frank Rowand Tested-by: Will Deacon Tested-by: Marc Zyngier Acked-by: Peter Zijlstra Link: http://lkml.kernel.org/n/20120313110840.7b444deb6b1bb902c15f3cdf@canb.auug.org.au Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 1 + kernel/sched/sched.h | 3 +++ 2 files changed, 4 insertions(+) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b342f57879e..423f40f32a5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1932,6 +1932,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) local_irq_enable(); #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); + finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); if (mm) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 98c0c2623db..d72483d07c9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -692,6 +692,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p) #ifndef finish_arch_switch # define finish_arch_switch(prev) do { } while (0) #endif +#ifndef finish_arch_post_lock_switch +# define finish_arch_post_lock_switch() do { } while (0) +#endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) -- cgit v1.2.3-18-g5258 From 6135fc1eb4b1c9ae5f535507ed59591bab51e630 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 28 Mar 2012 17:10:47 -0700 Subject: sched: Fix __schedule_bug() output when called from an interrupt If schedule is called from an interrupt handler __schedule_bug() will call show_regs() with the registers saved during the interrupt handling done in do_IRQ(). This means we'll see the registers and the backtrace for the process that was interrupted and not the full backtrace explaining who called schedule(). This is due to 838225b ("sched: use show_regs() to improve __schedule_bug() output", 2007-10-24) which improperly assumed that get_irq_regs() would return the registers for the current stack because it is being called from within an interrupt handler. Simply remove the show_reg() code so that we dump a backtrace for the interrupt handler that called schedule(). [ I ran across this when I was presented with a scheduling while atomic log with a stacktrace pointing at spin_unlock_irqrestore(). It made no sense and I had to guess what interrupt handler could be called and poke around for someone calling schedule() in an interrupt handler. A simple test of putting an msleep() in an interrupt handler works better with this patch because you can actually see the msleep() call in the backtrace. ] Also-reported-by: Chris Metcalf Signed-off-by: Stephen Boyd Cc: Satyam Sharma Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1332979847-27102-1-git-send-email-sboyd@codeaurora.org Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9c1629c90b2..929fd857ef8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3099,8 +3099,6 @@ EXPORT_SYMBOL(sub_preempt_count); */ static noinline void __schedule_bug(struct task_struct *prev) { - struct pt_regs *regs = get_irq_regs(); - if (oops_in_progress) return; @@ -3111,11 +3109,7 @@ static noinline void __schedule_bug(struct task_struct *prev) print_modules(); if (irqs_disabled()) print_irqtrace_events(prev); - - if (regs) - show_regs(regs); - else - dump_stack(); + dump_stack(); } /* -- cgit v1.2.3-18-g5258 From e3831edd59edf57ca11fc289f08961b20baf5146 Mon Sep 17 00:00:00 2001 From: "Srivatsa S. Bhat" Date: Fri, 30 Mar 2012 19:40:28 +0530 Subject: sched: Fix incorrect usage of for_each_cpu_mask() in select_fallback_rq() The function for_each_cpu_mask() expects a *pointer* to struct cpumask as its second argument, whereas select_fallback_rq() passes the value itself. And moreover, for_each_cpu_mask() has been marked as obselete in include/linux/cpumask.h. So move to the more appropriate for_each_cpu() variant. Reported-by: Sasha Levin Signed-off-by: Srivatsa S. Bhat Acked-by: Peter Zijlstra Cc: Dave Jones Cc: Liu Chuansheng Cc: vapier@gentoo.org Cc: rusty@rustcorp.com.au Link: http://lkml.kernel.org/r/4F75BED4.9050005@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 985f6e59515..8773176b8c7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1268,7 +1268,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) int dest_cpu; /* Look for allowed, online CPU in same node. */ - for_each_cpu_mask(dest_cpu, *nodemask) { + for_each_cpu(dest_cpu, nodemask) { if (!cpu_online(dest_cpu)) continue; if (!cpu_active(dest_cpu)) @@ -1279,7 +1279,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p) for (;;) { /* Any allowed, online CPU? */ - for_each_cpu_mask(dest_cpu, *tsk_cpus_allowed(p)) { + for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { if (!cpu_online(dest_cpu)) continue; if (!cpu_active(dest_cpu)) -- cgit v1.2.3-18-g5258