aboutsummaryrefslogtreecommitdiff
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c305
1 files changed, 90 insertions, 215 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b2498835345..5918d227730 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,10 +6,10 @@
* Distribute under GPLv2.
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
- *
- * Remote softirq infrastructure is by Jens Axboe.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/export.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
@@ -25,6 +25,7 @@
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
+#include <linux/irq.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -56,7 +57,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-char *softirq_to_name[NR_SOFTIRQS] = {
+const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
@@ -91,7 +92,7 @@ static void wakeup_softirqd(void)
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
-static void __local_bh_disable(unsigned long ip, unsigned int cnt)
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -109,33 +110,21 @@ static void __local_bh_disable(unsigned long ip, unsigned int cnt)
/*
* Were softirqs turned off above:
*/
- if (softirq_count() == cnt)
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt)
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
}
-#else /* !CONFIG_TRACE_IRQFLAGS */
-static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
-{
- preempt_count_add(cnt);
- barrier();
-}
+EXPORT_SYMBOL(__local_bh_disable_ip);
#endif /* CONFIG_TRACE_IRQFLAGS */
-void local_bh_disable(void)
-{
- __local_bh_disable(_RET_IP_, SOFTIRQ_DISABLE_OFFSET);
-}
-
-EXPORT_SYMBOL(local_bh_disable);
-
static void __local_bh_enable(unsigned int cnt)
{
WARN_ON_ONCE(!irqs_disabled());
- if (softirq_count() == cnt)
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
preempt_count_sub(cnt);
}
@@ -150,10 +139,9 @@ void _local_bh_enable(void)
WARN_ON_ONCE(in_irq());
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
}
-
EXPORT_SYMBOL(_local_bh_enable);
-static inline void _local_bh_enable_ip(unsigned long ip)
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
{
WARN_ON_ONCE(in_irq() || irqs_disabled());
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -167,8 +155,8 @@ static inline void _local_bh_enable_ip(unsigned long ip)
/*
* Keep preemption disabled until we are done with
* softirq processing:
- */
- preempt_count_sub(SOFTIRQ_DISABLE_OFFSET - 1);
+ */
+ preempt_count_sub(cnt - 1);
if (unlikely(!in_interrupt() && local_softirq_pending())) {
/*
@@ -184,18 +172,7 @@ static inline void _local_bh_enable_ip(unsigned long ip)
#endif
preempt_check_resched();
}
-
-void local_bh_enable(void)
-{
- _local_bh_enable_ip(_RET_IP_);
-}
-EXPORT_SYMBOL(local_bh_enable);
-
-void local_bh_enable_ip(unsigned long ip)
-{
- _local_bh_enable_ip(ip);
-}
-EXPORT_SYMBOL(local_bh_enable_ip);
+EXPORT_SYMBOL(__local_bh_enable_ip);
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
@@ -213,14 +190,48 @@ EXPORT_SYMBOL(local_bh_enable_ip);
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
#define MAX_SOFTIRQ_RESTART 10
-asmlinkage void __do_softirq(void)
+#ifdef CONFIG_TRACE_IRQFLAGS
+/*
+ * When we run softirqs from irq_exit() and thus on the hardirq stack we need
+ * to keep the lockdep irq context tracking as tight as possible in order to
+ * not miss-qualify lock contexts and miss possible deadlocks.
+ */
+
+static inline bool lockdep_softirq_start(void)
+{
+ bool in_hardirq = false;
+
+ if (trace_hardirq_context(current)) {
+ in_hardirq = true;
+ trace_hardirq_exit();
+ }
+
+ lockdep_softirq_enter();
+
+ return in_hardirq;
+}
+
+static inline void lockdep_softirq_end(bool in_hardirq)
+{
+ lockdep_softirq_exit();
+
+ if (in_hardirq)
+ trace_hardirq_enter();
+}
+#else
+static inline bool lockdep_softirq_start(void) { return false; }
+static inline void lockdep_softirq_end(bool in_hardirq) { }
+#endif
+
+asmlinkage __visible void __do_softirq(void)
{
- struct softirq_action *h;
- __u32 pending;
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
- int cpu;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
+ struct softirq_action *h;
+ bool in_hardirq;
+ __u32 pending;
+ int softirq_bit;
/*
* Mask out PF_MEMALLOC s current task context is borrowed for the
@@ -232,10 +243,9 @@ asmlinkage void __do_softirq(void)
pending = local_softirq_pending();
account_irq_enter_time(current);
- __local_bh_disable(_RET_IP_, SOFTIRQ_OFFSET);
- lockdep_softirq_enter();
+ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
+ in_hardirq = lockdep_softirq_start();
- cpu = smp_processor_id();
restart:
/* Reset the pending bitmask before enabling irqs */
set_softirq_pending(0);
@@ -244,31 +254,31 @@ restart:
h = softirq_vec;
- do {
- if (pending & 1) {
- unsigned int vec_nr = h - softirq_vec;
- int prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- printk(KERN_ERR "huh, entered softirq %u %s %p"
- "with preempt_count %08x,"
- " exited with %08x?\n", vec_nr,
- softirq_to_name[vec_nr], h->action,
- prev_count, preempt_count());
- preempt_count_set(prev_count);
- }
+ while ((softirq_bit = ffs(pending))) {
+ unsigned int vec_nr;
+ int prev_count;
+
+ h += softirq_bit - 1;
- rcu_bh_qs(cpu);
+ vec_nr = h - softirq_vec;
+ prev_count = preempt_count();
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+ vec_nr, softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
+ preempt_count_set(prev_count);
}
h++;
- pending >>= 1;
- } while (pending);
+ pending >>= softirq_bit;
+ }
+ rcu_bh_qs(smp_processor_id());
local_irq_disable();
pending = local_softirq_pending();
@@ -280,17 +290,14 @@ restart:
wakeup_softirqd();
}
- lockdep_softirq_exit();
-
+ lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
WARN_ON_ONCE(in_interrupt());
tsk_restore_flags(current, old_flags, PF_MEMALLOC);
}
-
-
-asmlinkage void do_softirq(void)
+asmlinkage __visible void do_softirq(void)
{
__u32 pending;
unsigned long flags;
@@ -313,8 +320,6 @@ asmlinkage void do_softirq(void)
*/
void irq_enter(void)
{
- int cpu = smp_processor_id();
-
rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
@@ -322,7 +327,7 @@ void irq_enter(void)
* here, as softirq will be serviced on return from interrupt.
*/
local_bh_disable();
- tick_check_idle(cpu);
+ tick_irq_enter();
_local_bh_enable();
}
@@ -377,13 +382,13 @@ void irq_exit(void)
#endif
account_irq_exit_time(current);
- trace_hardirq_exit();
preempt_count_sub(HARDIRQ_OFFSET);
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
tick_irq_exit();
rcu_irq_exit();
+ trace_hardirq_exit(); /* must be last! */
}
/*
@@ -429,8 +434,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
/*
* Tasklets
*/
-struct tasklet_head
-{
+struct tasklet_head {
struct tasklet_struct *head;
struct tasklet_struct **tail;
};
@@ -449,7 +453,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(TASKLET_SOFTIRQ);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(__tasklet_schedule);
void __tasklet_hi_schedule(struct tasklet_struct *t)
@@ -463,7 +466,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
raise_softirq_irqoff(HI_SOFTIRQ);
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(__tasklet_hi_schedule);
void __tasklet_hi_schedule_first(struct tasklet_struct *t)
@@ -474,7 +476,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
__this_cpu_write(tasklet_hi_vec.head, t);
__raise_softirq_irqoff(HI_SOFTIRQ);
}
-
EXPORT_SYMBOL(__tasklet_hi_schedule_first);
static void tasklet_action(struct softirq_action *a)
@@ -494,7 +495,8 @@ static void tasklet_action(struct softirq_action *a)
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+ &t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
@@ -529,7 +531,8 @@ static void tasklet_hi_action(struct softirq_action *a)
if (tasklet_trylock(t)) {
if (!atomic_read(&t->count)) {
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+ &t->state))
BUG();
t->func(t->data);
tasklet_unlock(t);
@@ -547,7 +550,6 @@ static void tasklet_hi_action(struct softirq_action *a)
}
}
-
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
@@ -557,13 +559,12 @@ void tasklet_init(struct tasklet_struct *t,
t->func = func;
t->data = data;
}
-
EXPORT_SYMBOL(tasklet_init);
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
- printk("Attempt to kill tasklet from interrupt\n");
+ pr_notice("Attempt to kill tasklet from interrupt\n");
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
@@ -573,7 +574,6 @@ void tasklet_kill(struct tasklet_struct *t)
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
}
-
EXPORT_SYMBOL(tasklet_kill);
/*
@@ -627,146 +627,17 @@ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
}
EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
-/*
- * Remote softirq bits
- */
-
-DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
-EXPORT_PER_CPU_SYMBOL(softirq_work_list);
-
-static void __local_trigger(struct call_single_data *cp, int softirq)
-{
- struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
-
- list_add_tail(&cp->list, head);
-
- /* Trigger the softirq only if the list was previously empty. */
- if (head->next == &cp->list)
- raise_softirq_irqoff(softirq);
-}
-
-#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
-static void remote_softirq_receive(void *data)
-{
- struct call_single_data *cp = data;
- unsigned long flags;
- int softirq;
-
- softirq = *(int *)cp->info;
- local_irq_save(flags);
- __local_trigger(cp, softirq);
- local_irq_restore(flags);
-}
-
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
- if (cpu_online(cpu)) {
- cp->func = remote_softirq_receive;
- cp->info = &softirq;
- cp->flags = 0;
-
- __smp_call_function_single(cpu, cp, 0);
- return 0;
- }
- return 1;
-}
-#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
-static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
- return 1;
-}
-#endif
-
-/**
- * __send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @this_cpu: the currently executing cpu
- * @softirq: the softirq for the work
- *
- * Attempt to schedule softirq work on a remote cpu. If this cannot be
- * done, the work is instead queued up on the local cpu.
- *
- * Interrupts must be disabled.
- */
-void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
-{
- if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
- __local_trigger(cp, softirq);
-}
-EXPORT_SYMBOL(__send_remote_softirq);
-
-/**
- * send_remote_softirq - try to schedule softirq work on a remote cpu
- * @cp: private SMP call function data area
- * @cpu: the remote cpu
- * @softirq: the softirq for the work
- *
- * Like __send_remote_softirq except that disabling interrupts and
- * computing the current cpu is done for the caller.
- */
-void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
-{
- unsigned long flags;
- int this_cpu;
-
- local_irq_save(flags);
- this_cpu = smp_processor_id();
- __send_remote_softirq(cp, cpu, this_cpu, softirq);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(send_remote_softirq);
-
-static int remote_softirq_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- /*
- * If a CPU goes away, splice its entries to the current CPU
- * and trigger a run of the softirq
- */
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- int cpu = (unsigned long) hcpu;
- int i;
-
- local_irq_disable();
- for (i = 0; i < NR_SOFTIRQS; i++) {
- struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
- struct list_head *local_head;
-
- if (list_empty(head))
- continue;
-
- local_head = &__get_cpu_var(softirq_work_list[i]);
- list_splice_init(head, local_head);
- raise_softirq_irqoff(i);
- }
- local_irq_enable();
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block remote_softirq_cpu_notifier = {
- .notifier_call = remote_softirq_cpu_notify,
-};
-
void __init softirq_init(void)
{
int cpu;
for_each_possible_cpu(cpu) {
- int i;
-
per_cpu(tasklet_vec, cpu).tail =
&per_cpu(tasklet_vec, cpu).head;
per_cpu(tasklet_hi_vec, cpu).tail =
&per_cpu(tasklet_hi_vec, cpu).head;
- for (i = 0; i < NR_SOFTIRQS; i++)
- INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
}
- register_hotcpu_notifier(&remote_softirq_cpu_notifier);
-
open_softirq(TASKLET_SOFTIRQ, tasklet_action);
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
@@ -852,9 +723,8 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
{
switch (action) {
#ifdef CONFIG_HOTPLUG_CPU
@@ -907,3 +777,8 @@ int __init __weak arch_early_irq_init(void)
{
return 0;
}
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+ return from;
+}