diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 3 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/lockdep.c | 22 | ||||
-rw-r--r-- | kernel/rtmutex-debug.c | 15 | ||||
-rw-r--r-- | kernel/rtmutex.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 7 | ||||
-rw-r--r-- | kernel/signal.c | 2 | ||||
-rw-r--r-- | kernel/softlockup.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 2 |
9 files changed, 31 insertions, 26 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index a21f71af9d8..ebf6647a2bd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -98,7 +98,8 @@ static inline void check_for_tasks(int cpu) !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ (state = %ld, flags = %x) \n", - p->comm, p->pid, cpu, p->state, p->flags); + p->comm, task_pid_nr(p), cpu, + p->state, p->flags); } write_unlock_irq(&tasklist_lock); } diff --git a/kernel/exit.c b/kernel/exit.c index 6838d4d77e0..7dab2defec6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -959,7 +959,7 @@ fastcall NORET_TYPE void do_exit(long code) if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b5392ff7e6a..55fe0c7cd95 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -511,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr) int i, depth = curr->lockdep_depth; if (!depth) { - printk("no locks held by %s/%d.\n", curr->comm, curr->pid); + printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); return; } printk("%d lock%s held by %s/%d:\n", - depth, depth > 1 ? "s" : "", curr->comm, curr->pid); + depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); for (i = 0; i < depth; i++) { printk(" #%d: ", i); @@ -904,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) print_kernel_version(); printk( "-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(check_source); printk("\nbut task is already holding lock:\n"); print_lock(check_target); @@ -1085,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr, print_kernel_version(); printk( "------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", - curr->comm, curr->pid, + curr->comm, task_pid_nr(curr), curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, curr->hardirqs_enabled, @@ -1237,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_kernel_version(); printk( "---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(next); printk("\nbut task is already holding lock:\n"); print_lock(prev); @@ -1641,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, usage_str[prev_bit], usage_str[new_bit]); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", - curr->comm, curr->pid, + curr->comm, task_pid_nr(curr), trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, trace_hardirqs_enabled(curr), @@ -1694,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, print_kernel_version(); printk( "---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(this); if (forwards) printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); @@ -2487,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, printk( "[ BUG: bad unlock balance detected! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is trying to release lock (", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); @@ -2737,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, printk( "[ BUG: bad contention detected! ]\n"); printk( "---------------------------------\n"); printk("%s/%d is trying to contend lock (", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); @@ -3072,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, printk( "[ BUG: held lock freed! ]\n"); printk( "-------------------------\n"); printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", - curr->comm, curr->pid, mem_from, mem_to-1); + curr->comm, task_pid_nr(curr), mem_from, mem_to-1); print_lock(hlock); lockdep_print_held_locks(curr); @@ -3125,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr) printk( "[ BUG: lock held at task exit time! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is exiting with locks still held!\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 6b0703db152..56d73cb8826 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -87,7 +87,7 @@ static int rt_trace_on = 1; static void printk_task(struct task_struct *p) { if (p) - printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); + printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); else printk("<none>"); } @@ -152,22 +152,25 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) printk( "[ BUG: circular locking deadlock detected! ]\n"); printk( "--------------------------------------------\n"); printk("%s/%d is deadlocking current task %s/%d\n\n", - task->comm, task->pid, current->comm, current->pid); + task->comm, task_pid_nr(task), + current->comm, task_pid_nr(current)); printk("\n1) %s/%d is trying to acquire this lock:\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); printk_lock(waiter->lock, 1); - printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); + printk("\n2) %s/%d is blocked on this lock:\n", + task->comm, task_pid_nr(task)); printk_lock(waiter->deadlock_lock, 1); debug_show_held_locks(current); debug_show_held_locks(task); - printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); + printk("\n%s/%d's [blocked] stackdump:\n\n", + task->comm, task_pid_nr(task)); show_stack(task, NULL); printk("\n%s/%d's [current] stackdump:\n\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); dump_stack(); debug_show_all_locks(); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 8cd9bd2cdb3..0deef71ff8d 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -185,7 +185,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, prev_max = max_lock_depth; printk(KERN_WARNING "Maximum lock depth %d reached " "task: %s (%d)\n", max_lock_depth, - top_task->comm, top_task->pid); + top_task->comm, task_pid_nr(top_task)); } put_task_struct(task); diff --git a/kernel/sched.c b/kernel/sched.c index 9d458504e3a..a7e30462600 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3502,7 +3502,7 @@ EXPORT_SYMBOL(sub_preempt_count); static noinline void __schedule_bug(struct task_struct *prev) { printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), prev->pid); + prev->comm, preempt_count(), task_pid_nr(prev)); debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); @@ -4865,7 +4865,8 @@ static void show_task(struct task_struct *p) free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif - printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid); + printk(KERN_CONT "%5lu %5d %6d\n", free, + task_pid_nr(p), task_pid_nr(p->parent)); if (state != TASK_RUNNING) show_stack(p, NULL); @@ -5172,7 +5173,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) if (p->mm && printk_ratelimit()) printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", - p->pid, p->comm, dead_cpu); + task_pid_nr(p), p->comm, dead_cpu); } } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } diff --git a/kernel/signal.c b/kernel/signal.c index 08364e75bb5..12006308c7e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -730,7 +730,7 @@ int print_fatal_signals; static void print_fatal_signal(struct pt_regs *regs, int signr) { printk("%s/%d: potentially unexpected fatal signal %d.\n", - current->comm, current->pid, signr); + current->comm, task_pid_nr(current), signr); #ifdef __i386__ printk("code at %08lx: ", regs->eip); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index edeeef3a6a3..11df812263c 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -113,7 +113,7 @@ void softlockup_tick(void) spin_lock(&print_lock); printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", this_cpu, now - touch_timestamp, - current->comm, current->pid); + current->comm, task_pid_nr(current)); if (regs) show_regs(regs); else diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d1916fea710..52d5e7c9a8e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -282,7 +282,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), - current->pid); + task_pid_nr(current)); printk(KERN_ERR " last function: "); print_symbol("%s\n", (unsigned long)f); debug_show_held_locks(current); |