diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditsc.c | 2 | ||||
-rw-r--r-- | kernel/debug/debug_core.c | 5 | ||||
-rw-r--r-- | kernel/debug/debug_core.h | 2 | ||||
-rw-r--r-- | kernel/hung_task.c | 6 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 1 | ||||
-rw-r--r-- | kernel/kexec.c | 2 | ||||
-rw-r--r-- | kernel/kmod.c | 2 | ||||
-rw-r--r-- | kernel/power/block_io.c | 2 | ||||
-rw-r--r-- | kernel/rcu/update.c | 11 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
-rw-r--r-- | kernel/smp.c | 68 | ||||
-rw-r--r-- | kernel/softirq.c | 74 | ||||
-rw-r--r-- | kernel/sysctl.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 27 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 15 | ||||
-rw-r--r-- | kernel/trace/trace.c | 113 |
17 files changed, 175 insertions, 171 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd5956..7aef2f4b6c6 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name) struct audit_context *context = current->audit_context; BUG_ON(!context); - if (!context->in_syscall) { + if (!name->aname || !context->in_syscall) { #if AUDIT_DEBUG == 2 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", __FILE__, __LINE__, context->serial, name); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 7d2f35e5df2..334b3980ffc 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -736,7 +736,8 @@ int kgdb_nmicallback(int cpu, void *regs) return 1; } -int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready) +int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, + atomic_t *send_ready) { #ifdef CONFIG_SMP if (!kgdb_io_ready(0) || !send_ready) @@ -750,7 +751,7 @@ int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready) ks->cpu = cpu; ks->ex_vector = trapnr; ks->signo = SIGTRAP; - ks->err_code = KGDB_KDB_REASON_SYSTEM_NMI; + ks->err_code = err_code; ks->linux_regs = regs; ks->send_ready = send_ready; kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h index 572aa4f5677..127d9bc49fb 100644 --- a/kernel/debug/debug_core.h +++ b/kernel/debug/debug_core.h @@ -75,13 +75,11 @@ extern int kdb_stub(struct kgdb_state *ks); extern int kdb_parse(const char *cmdstr); extern int kdb_common_init_state(struct kgdb_state *ks); extern int kdb_common_deinit_state(void); -#define KGDB_KDB_REASON_SYSTEM_NMI KDB_REASON_SYSTEM_NMI #else /* ! CONFIG_KGDB_KDB */ static inline int kdb_stub(struct kgdb_state *ks) { return DBG_PASS_EVENT; } -#define KGDB_KDB_REASON_SYSTEM_NMI 0 #endif /* CONFIG_KGDB_KDB */ #endif /* _DEBUG_CORE_H_ */ diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 9328b80eaf1..0b9c169d577 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -37,7 +37,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; */ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; -unsigned long __read_mostly sysctl_hung_task_warnings = 10; +int __read_mostly sysctl_hung_task_warnings = 10; static int __read_mostly did_panic; @@ -98,7 +98,9 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) if (!sysctl_hung_task_warnings) return; - sysctl_hung_task_warnings--; + + if (sysctl_hung_task_warnings > 0) + sysctl_hung_task_warnings--; /* * Ok, the task did not get scheduled for more than 2 minutes, diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef09f65..07cbdfea9ae 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER # Generic configurable interrupt chip implementation config GENERIC_IRQ_CHIP bool + select IRQ_DOMAIN # Generic irq_domain hw <--> linux irq number translation config IRQ_DOMAIN diff --git a/kernel/kexec.c b/kernel/kexec.c index ac738781d35..60bafbed06a 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...) size_t r; va_start(args, fmt); - r = vsnprintf(buf, sizeof(buf), fmt, args); + r = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); diff --git a/kernel/kmod.c b/kernel/kmod.c index b086006c59e..6b375af4958 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data) commit_creds(new); - retval = do_execve(sub_info->path, + retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); if (!retval) diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c index d09dd10c5a5..9a58bc25881 100644 --- a/kernel/power/block_io.c +++ b/kernel/power/block_io.c @@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector, struct bio *bio; bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); - bio->bi_sector = sector; + bio->bi_iter.bi_sector = sector; bio->bi_bdev = bdev; bio->bi_end_io = end_swap_bio_read; diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 802365ccd59..c54609faf23 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -200,17 +200,6 @@ void wait_rcu_gp(call_rcu_func_t crf) } EXPORT_SYMBOL_GPL(wait_rcu_gp); -#ifdef CONFIG_PROVE_RCU -/* - * wrapper function to avoid #include problems. - */ -int rcu_my_thread_group_empty(void) -{ - return thread_group_empty(current); -} -EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); -#endif /* #ifdef CONFIG_PROVE_RCU */ - #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD static inline void debug_init_rcu_head(struct rcu_head *head) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7fea865a810..b46131ef6aa 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2476,7 +2476,7 @@ u64 scheduler_tick_max_deferment(void) if (time_before_eq(next, now)) return 0; - return jiffies_to_usecs(next - now) * NSEC_PER_USEC; + return jiffies_to_nsecs(next - now); } #endif @@ -4347,7 +4347,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, goto out_unlock; rq = task_rq_lock(p, &flags); - time_slice = p->sched_class->get_rr_interval(rq, p); + time_slice = 0; + if (p->sched_class->get_rr_interval) + time_slice = p->sched_class->get_rr_interval(rq, p); task_rq_unlock(rq, p, &flags); rcu_read_unlock(); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0de24820287..0dd5e0971a0 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -351,7 +351,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, * disrupting the schedulability of the system. Otherwise, we should * refill the runtime and set the deadline a period in the future, * because keeping the current (absolute) deadline of the task would - * result in breaking guarantees promised to other tasks. + * result in breaking guarantees promised to other tasks (refer to + * Documentation/scheduler/sched-deadline.txt for more informations). * * This function returns true if: * diff --git a/kernel/smp.c b/kernel/smp.c index bd9f9402883..ffee35bef17 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -23,17 +23,11 @@ enum { struct call_function_data { struct call_single_data __percpu *csd; cpumask_var_t cpumask; - cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); -struct call_single_queue { - struct list_head list; - raw_spinlock_t lock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); - if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, - cpu_to_node(cpu))) { - free_cpumask_var(cfd->cpumask); - return notifier_from_errno(-ENOMEM); - } cfd->csd = alloc_percpu(struct call_single_data); if (!cfd->csd) { - free_cpumask_var(cfd->cpumask_ipi); free_cpumask_var(cfd->cpumask); return notifier_from_errno(-ENOMEM); } @@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); - free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); break; #endif @@ -85,12 +72,8 @@ void __init call_function_init(void) void *cpu = (void *)(long)smp_processor_id(); int i; - for_each_possible_cpu(i) { - struct call_single_queue *q = &per_cpu(call_single_queue, i); - - raw_spin_lock_init(&q->lock); - INIT_LIST_HEAD(&q->list); - } + for_each_possible_cpu(i) + init_llist_head(&per_cpu(call_single_queue, i)); hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); @@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd) */ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) { - struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); - unsigned long flags; - int ipi; - if (wait) csd->flags |= CSD_FLAG_WAIT; - raw_spin_lock_irqsave(&dst->lock, flags); - ipi = list_empty(&dst->list); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); - /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of @@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ - if (ipi) + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); if (wait) @@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) */ void generic_smp_call_function_single_interrupt(void) { - struct call_single_queue *q = &__get_cpu_var(call_single_queue); - LIST_HEAD(list); + struct llist_node *entry, *next; /* * Shouldn't receive this interrupt on a cpu that is not yet online. */ WARN_ON_ONCE(!cpu_online(smp_processor_id())); - raw_spin_lock(&q->lock); - list_replace_init(&q->list, &list); - raw_spin_unlock(&q->lock); + entry = llist_del_all(&__get_cpu_var(call_single_queue)); + entry = llist_reverse_order(entry); - while (!list_empty(&list)) { + while (entry) { struct call_single_data *csd; - csd = list_entry(list.next, struct call_single_data, list); - list_del(&csd->list); + next = entry->next; + csd = llist_entry(entry, struct call_single_data, llist); csd->func(csd->info); - csd_unlock(csd); + + entry = next; } } @@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask, if (unlikely(!cpumask_weight(cfd->cpumask))) return; - /* - * After we put an entry into the list, cfd->cpumask may be cleared - * again when another CPU sends another IPI for a SMP function call, so - * cfd->cpumask will be zero. - */ - cpumask_copy(cfd->cpumask_ipi, cfd->cpumask); - for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); - struct call_single_queue *dst = - &per_cpu(call_single_queue, cpu); - unsigned long flags; csd_lock(csd); csd->func = func; csd->info = info; - - raw_spin_lock_irqsave(&dst->lock, flags); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); + llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); } /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi_mask(cfd->cpumask_ipi); + arch_send_call_function_ipi_mask(cfd->cpumask); if (wait) { for_each_cpu(cpu, cfd->cpumask) { diff --git a/kernel/softirq.c b/kernel/softirq.c index 8a1e6e10489..490fcbb1dc5 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -8,6 +8,8 @@ * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/export.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> @@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -char *softirq_to_name[NR_SOFTIRQS] = { +const char * const softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; @@ -136,7 +138,6 @@ void _local_bh_enable(void) WARN_ON_ONCE(in_irq()); __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } - EXPORT_SYMBOL(_local_bh_enable); void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) @@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) /* * Keep preemption disabled until we are done with * softirq processing: - */ + */ preempt_count_sub(cnt - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) { @@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void) struct softirq_action *h; bool in_hardirq; __u32 pending; + int softirq_bit; int cpu; /* @@ -253,30 +255,30 @@ restart: h = softirq_vec; - do { - if (pending & 1) { - unsigned int vec_nr = h - softirq_vec; - int prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - printk(KERN_ERR "huh, entered softirq %u %s %p" - "with preempt_count %08x," - " exited with %08x?\n", vec_nr, - softirq_to_name[vec_nr], h->action, - prev_count, preempt_count()); - preempt_count_set(prev_count); - } + while ((softirq_bit = ffs(pending))) { + unsigned int vec_nr; + int prev_count; - rcu_bh_qs(cpu); + h += softirq_bit - 1; + + vec_nr = h - softirq_vec; + prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count_set(prev_count); } + rcu_bh_qs(cpu); h++; - pending >>= 1; - } while (pending); + pending >>= softirq_bit; + } local_irq_disable(); @@ -326,7 +328,7 @@ void irq_enter(void) * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); - tick_check_idle(); + tick_irq_enter(); _local_bh_enable(); } @@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) /* * Tasklets */ -struct tasklet_head -{ +struct tasklet_head { struct tasklet_struct *head; struct tasklet_struct **tail; }; @@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t) raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } - EXPORT_SYMBOL(__tasklet_schedule); void __tasklet_hi_schedule(struct tasklet_struct *t) @@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } - EXPORT_SYMBOL(__tasklet_hi_schedule); void __tasklet_hi_schedule_first(struct tasklet_struct *t) @@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) __this_cpu_write(tasklet_hi_vec.head, t); __raise_softirq_irqoff(HI_SOFTIRQ); } - EXPORT_SYMBOL(__tasklet_hi_schedule_first); static void tasklet_action(struct softirq_action *a) @@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a) if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + if (!test_and_clear_bit(TASKLET_STATE_SCHED, + &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); @@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a) if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + if (!test_and_clear_bit(TASKLET_STATE_SCHED, + &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); @@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a) } } - void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { @@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t, t->func = func; t->data = data; } - EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) - printk("Attempt to kill tasklet from interrupt\n"); + pr_notice("Attempt to kill tasklet from interrupt\n"); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { @@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t) tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } - EXPORT_SYMBOL(tasklet_kill); /* @@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu) } #endif /* CONFIG_HOTPLUG_CPU */ -static int cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static int cpu_callback(struct notifier_block *nfb, unsigned long action, + void *hcpu) { switch (action) { #ifdef CONFIG_HOTPLUG_CPU diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 096db7452cb..49e13e1f8fe 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -121,6 +121,8 @@ extern int blk_iopoll_enabled; static int sixty = 60; #endif +static int __maybe_unused neg_one = -1; + static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; @@ -997,9 +999,10 @@ static struct ctl_table kern_table[] = { { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_doulongvec_minmax, + .proc_handler = proc_dointvec_minmax, + .extra1 = &neg_one, }, #endif #ifdef CONFIG_COMPAT diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 08cb0c3b8cc..9f8af69c67e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -533,12 +533,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; + time_delta = timekeeping_max_deferment(); + /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&jiffies_lock); last_update = last_jiffies_update; last_jiffies = jiffies; - time_delta = timekeeping_max_deferment(); } while (read_seqretry(&jiffies_lock, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || @@ -678,18 +679,18 @@ out: static void tick_nohz_full_stop_tick(struct tick_sched *ts) { #ifdef CONFIG_NO_HZ_FULL - int cpu = smp_processor_id(); + int cpu = smp_processor_id(); - if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) - return; + if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) + return; - if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) - return; + if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) + return; - if (!can_stop_full_tick()) - return; + if (!can_stop_full_tick()) + return; - tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); + tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); #endif } @@ -1023,7 +1024,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) #endif } -static inline void tick_check_nohz_this_cpu(void) +static inline void tick_nohz_irq_enter(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; @@ -1042,17 +1043,17 @@ static inline void tick_check_nohz_this_cpu(void) #else static inline void tick_nohz_switch_to_nohz(void) { } -static inline void tick_check_nohz_this_cpu(void) { } +static inline void tick_nohz_irq_enter(void) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter to notify about the possible interruption of idle() */ -void tick_check_idle(void) +void tick_irq_enter(void) { tick_check_oneshot_broadcast_this_cpu(); - tick_check_nohz_this_cpu(); + tick_nohz_irq_enter(); } /* diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index f785aef6579..b418cb0d724 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, if (!error && !bio_flagged(bio, BIO_UPTODATE)) error = EIO; - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, - error, 0, NULL); + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio->bi_rw, what, error, 0, NULL); } static void blk_add_trace_bio_bounce(void *ignore, @@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore, if (bt) { __be64 rpdu = cpu_to_be64(pdu); - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, - BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), + __blk_add_trace(bt, bio->bi_iter.bi_sector, + bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT, + !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); } } @@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore, r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.sector_from = cpu_to_be64(from); - __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, - BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), - sizeof(r), &r); + __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, + bio->bi_rw, BLK_TA_REMAP, + !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); } /** diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 20c755e018c..815c878f409 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -455,6 +455,9 @@ int __trace_puts(unsigned long ip, const char *str, int size) unsigned long irq_flags; int alloc; + if (unlikely(tracing_selftest_running || tracing_disabled)) + return 0; + alloc = sizeof(*entry) + size + 2; /* possible \n added */ local_save_flags(irq_flags); @@ -495,6 +498,9 @@ int __trace_bputs(unsigned long ip, const char *str) unsigned long irq_flags; int size = sizeof(struct bputs_entry); + if (unlikely(tracing_selftest_running || tracing_disabled)) + return 0; + local_save_flags(irq_flags); buffer = global_trace.trace_buffer.buffer; event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, @@ -3519,60 +3525,103 @@ static const char readme_msg[] = " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" - "\t\t\t Disable an option by adding a suffix 'no' to the option name\n" + "\t\t\t Disable an option by adding a suffix 'no' to the\n" + "\t\t\t option name\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" - " set_ftrace_filter\t- echo function name in here to only trace these functions\n" - " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" - " modules: Can select a group via module\n" - " Format: :mod:<module-name>\n" - " example: echo :mod:ext3 > set_ftrace_filter\n" - " triggers: a command to perform when function is hit\n" - " Format: <function>:<trigger>[:count]\n" - " trigger: traceon, traceoff\n" - " enable_event:<system>:<event>\n" - " disable_event:<system>:<event>\n" + " set_ftrace_filter\t- echo function name in here to only trace these\n" + "\t\t\t functions\n" + "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" + "\t modules: Can select a group via module\n" + "\t Format: :mod:<module-name>\n" + "\t example: echo :mod:ext3 > set_ftrace_filter\n" + "\t triggers: a command to perform when function is hit\n" + "\t Format: <function>:<trigger>[:count]\n" + "\t trigger: traceon, traceoff\n" + "\t\t enable_event:<system>:<event>\n" + "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE - " stacktrace\n" + "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT - " snapshot\n" + "\t\t snapshot\n" #endif - " example: echo do_fault:traceoff > set_ftrace_filter\n" - " echo do_trap:traceoff:3 > set_ftrace_filter\n" - " The first one will disable tracing every time do_fault is hit\n" - " The second will disable tracing at most 3 times when do_trap is hit\n" - " The first time do trap is hit and it disables tracing, the counter\n" - " will decrement to 2. If tracing is already disabled, the counter\n" - " will not decrement. It only decrements when the trigger did work\n" - " To remove trigger without count:\n" - " echo '!<function>:<trigger> > set_ftrace_filter\n" - " To remove trigger with a count:\n" - " echo '!<function>:<trigger>:0 > set_ftrace_filter\n" + "\t example: echo do_fault:traceoff > set_ftrace_filter\n" + "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" + "\t The first one will disable tracing every time do_fault is hit\n" + "\t The second will disable tracing at most 3 times when do_trap is hit\n" + "\t The first time do trap is hit and it disables tracing, the\n" + "\t counter will decrement to 2. If tracing is already disabled,\n" + "\t the counter will not decrement. It only decrements when the\n" + "\t trigger did work\n" + "\t To remove trigger without count:\n" + "\t echo '!<function>:<trigger> > set_ftrace_filter\n" + "\t To remove trigger with a count:\n" + "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" - " accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" - " modules: Can select a group via module command :mod:\n" - " Does not accept triggers\n" + "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" + "\t modules: Can select a group via module command :mod:\n" + "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER - " set_ftrace_pid\t- Write pid(s) to only function trace those pids (function)\n" + " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" + "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT - "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" - "\t\t\t Read the contents for more information\n" + "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" + "\t\t\t snapshot buffer. Read the contents for more\n" + "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" - "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" + "\t\t\t Write into this file to reset the max size (trigger a\n" + "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE - " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" + " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" + "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ + " events/\t\t- Directory containing all trace event subsystems:\n" + " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" + " events/<system>/\t- Directory containing all trace events for <system>:\n" + " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" + "\t\t\t events\n" + " filter\t\t- If set, only events passing filter are traced\n" + " events/<system>/<event>/\t- Directory containing control files for\n" + "\t\t\t <event>:\n" + " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" + " filter\t\t- If set, only events passing filter are traced\n" + " trigger\t\t- If set, a command to perform when event is hit\n" + "\t Format: <trigger>[:count][if <filter>]\n" + "\t trigger: traceon, traceoff\n" + "\t enable_event:<system>:<event>\n" + "\t disable_event:<system>:<event>\n" +#ifdef CONFIG_STACKTRACE + "\t\t stacktrace\n" +#endif +#ifdef CONFIG_TRACER_SNAPSHOT + "\t\t snapshot\n" +#endif + "\t example: echo traceoff > events/block/block_unplug/trigger\n" + "\t echo traceoff:3 > events/block/block_unplug/trigger\n" + "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" + "\t events/block/block_unplug/trigger\n" + "\t The first disables tracing every time block_unplug is hit.\n" + "\t The second disables tracing the first 3 times block_unplug is hit.\n" + "\t The third enables the kmalloc event the first 3 times block_unplug\n" + "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" + "\t Like function triggers, the counter is only decremented if it\n" + "\t enabled or disabled tracing.\n" + "\t To remove a trigger without a count:\n" + "\t echo '!<trigger> > <system>/<event>/trigger\n" + "\t To remove a trigger with a count:\n" + "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" + "\t Filters can be ignored when removing a trigger.\n" ; static ssize_t |