diff options
37 files changed, 163 insertions, 157 deletions
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c index 8c8aaa205ea..8d2982aa1b8 100644 --- a/arch/alpha/kernel/semaphore.c +++ b/arch/alpha/kernel/semaphore.c @@ -69,7 +69,7 @@ __down_failed(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down failed(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif tsk->state = TASK_UNINTERRUPTIBLE; @@ -98,7 +98,7 @@ __down_failed(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down acquired(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif } @@ -111,7 +111,7 @@ __down_failed_interruptible(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down failed(%p)\n", - tsk->comm, tsk->pid, sem); + tsk->comm, task_pid_nr(tsk), sem); #endif tsk->state = TASK_INTERRUPTIBLE; @@ -139,7 +139,7 @@ __down_failed_interruptible(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down %s(%p)\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), (ret < 0 ? "interrupted" : "acquired"), sem); #endif return ret; @@ -168,7 +168,7 @@ down(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif __down(sem); @@ -182,7 +182,7 @@ down_interruptible(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif return __down_interruptible(sem); @@ -201,7 +201,7 @@ down_trylock(struct semaphore *sem) #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): down_trylock %s from %p\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), ret ? "failed" : "acquired", __builtin_return_address(0)); #endif @@ -217,7 +217,7 @@ up(struct semaphore *sem) #endif #ifdef CONFIG_DEBUG_SEMAPHORE printk("%s(%d): up(%p) <count=%d> from %p\n", - current->comm, current->pid, sem, + current->comm, task_pid_nr(current), sem, atomic_read(&sem->count), __builtin_return_address(0)); #endif __up(sem); diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index ec0f05e0d8f..2dc7f9fed21 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -182,7 +182,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif - printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); + printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); @@ -646,7 +646,7 @@ got_exception: lock_kernel(); printk("%s(%d): unhandled unaligned exception\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", pc, una_reg(26), regs->ps); @@ -786,7 +786,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, } if (++cnt < 5) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } last_time = jiffies; diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index e0593e60614..4829f96585b 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -194,7 +194,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto survive; } printk(KERN_ALERT "VM: killing process %s(%d)\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); if (!user_mode(regs)) goto no_context; do_group_exit(SIGKILL); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 93b7f8e22dc..4f1a03124a7 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -265,7 +265,7 @@ void __show_regs(struct pt_regs *regs) void show_regs(struct pt_regs * regs) { printk("\n"); - printk("Pid: %d, comm: %20s\n", current->pid, current->comm); + printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); __show_regs(regs); __backtrace(); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5feee722ea9..4b05dc5c102 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -382,16 +382,16 @@ static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp) if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " - "0x%08lx (0x%04x)\n", task->comm, task->pid, - addr, old_insn.thumb); + "0x%08lx (0x%04x)\n", task->comm, + task_pid_nr(task), addr, old_insn.thumb); } else { ret = swap_insn(task, addr & ~3, &old_insn.arm, &bp->insn.arm, 4); if (ret != 4 || old_insn.arm != BREAKINST_ARM) printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " - "0x%08lx (0x%08x)\n", task->comm, task->pid, - addr, old_insn.arm); + "0x%08lx (0x%08x)\n", task->comm, + task_pid_nr(task), addr, old_insn.arm); } } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 8ad47619c07..4764bd9ccee 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -223,7 +223,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p print_modules(); __show_regs(regs); printk("Process %s (pid: %d, stack limit = 0x%p)\n", - tsk->comm, tsk->pid, thread + 1); + tsk->comm, task_pid_nr(tsk), thread + 1); if (!user_mode(regs) || in_interrupt()) { dump_mem("Stack: ", regs->ARM_sp, @@ -337,7 +337,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", - current->comm, current->pid, pc); + current->comm, task_pid_nr(current), pc); dump_instr(regs); } #endif @@ -388,7 +388,7 @@ static int bad_syscall(int n, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_SYSCALL) { printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", - current->pid, current->comm, n); + task_pid_nr(current), current->comm, n); dump_instr(regs); } #endif @@ -565,7 +565,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) */ if (user_debug & UDBG_SYSCALL) { printk("[%d] %s: arm syscall %d\n", - current->pid, current->comm, no); + task_pid_nr(current), current->comm, no); dump_instr(regs); if (user_mode(regs)) { __show_regs(regs); @@ -642,7 +642,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs) #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_BADABORT) { printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", - current->pid, current->comm, code, instr); + task_pid_nr(current), current->comm, code, instr); dump_instr(regs); show_pte(current->mm, addr); } diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 074b7cb0774..e162cca5917 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -757,7 +757,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (ai_usermode & 1) printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " "Address=0x%08lx FSR 0x%03x\n", current->comm, - current->pid, instrptr, + task_pid_nr(current), instrptr, thumb_mode(regs) ? 4 : 8, thumb_mode(regs) ? tinstr : instr, addr, fsr); diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index a3405b3c1ee..d025a22eb22 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -773,7 +773,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if (flags & MAP_SHARED) printk(KERN_INFO "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", - current->comm, current->pid, start); + current->comm, task_pid_nr(current), start); ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, off); if (IS_ERR((void *) ret)) @@ -786,7 +786,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if (flags & MAP_SHARED) printk(KERN_INFO "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", - current->comm, current->pid, end); + current->comm, task_pid_nr(current), end); ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, (off + len) - offset_in_page(end)); if (IS_ERR((void *) ret)) @@ -816,7 +816,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro if ((flags & MAP_SHARED) && !is_congruent) printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " - "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); + "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off); DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, is_congruent ? "congruent" : "not congruent", poff); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f55fa07849c..59169bf7145 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -158,14 +158,14 @@ */ #define PROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_lock_irqsave(&(c)->ctx_lock, f); \ - DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \ } while(0) #define UNPROTECT_CTX(c, f) \ do { \ - DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ + DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \ spin_unlock_irqrestore(&(c)->ctx_lock, f); \ } while(0) @@ -227,12 +227,12 @@ #ifdef PFM_DEBUGGING #define DPRINT(a) \ do { \ - if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #define DPRINT_ovfl(a) \ do { \ - if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ + if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ } while (0) #endif @@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task) unsigned long mask, val, ovfl_mask; int i; - DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); + DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task))); ovfl_mask = pmu_conf->ovfl_val; /* @@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task) ovfl_mask = pmu_conf->ovfl_val; if (task != current) { - printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); + printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current)); return; } if (ctx->ctx_state != PFM_CTX_MASKED) { printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, - task->pid, current->pid, ctx->ctx_state); + task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); return; } psr = pfm_get_psr(); @@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task) if ((mask & 0x1) == 0UL) continue; ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; ia64_set_pmc(i, ctx->th_pmcs[i]); - DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); + DPRINT(("[%d] pmc[%d]=0x%lx\n", + task_pid_nr(task), i, ctx->th_pmcs[i])); } ia64_srlz_d(); @@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) error_conflict: DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", - pfm_sessions.pfs_sys_session[cpu]->pid, + task_pid_nr(pfm_sessions.pfs_sys_session[cpu]), cpu)); abort: UNLOCK_PFS(flags); @@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz /* sanity checks */ if (task->mm == NULL || size == 0UL || vaddr == NULL) { - printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); + printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm); return -EINVAL; } @@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz up_write(&task->mm->mmap_sem); if (r !=0) { - printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); + printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); } DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); @@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx) return 0; invalid_free: - printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); + printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current)); return -EINVAL; } #endif @@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) unsigned long flags; DECLARE_WAITQUEUE(wait, current); if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return -EINVAL; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); return -EINVAL; } @@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) PROTECT_CTX(ctx, flags); } - DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); + DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret)); set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->ctx_msgq_wait, &wait); @@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) ret = -EINVAL; msg = pfm_get_next_msg(ctx); if (msg == NULL) { - printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); + printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); goto abort_locked; } @@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait) unsigned int mask = 0; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current)); return 0; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); return 0; } @@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", - current->pid, + task_pid_nr(current), fd, on, ctx->ctx_async_queue, ret)); @@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on) int ret; if (PFM_IS_FILE(filp) == 0) { - printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current)); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } /* @@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info) if (owner != ctx->ctx_task) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", smp_processor_id(), - owner->pid, ctx->ctx_task->pid); + task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); return; } if (GET_PMU_CTX() != ctx) { @@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info) return; } - DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); + DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); /* * the context is already protected in pfm_close(), we simply * need to mask interrupts to avoid a PMU interrupt race on @@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp) ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { - printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); + printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; } @@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp) */ ctx->ctx_state = PFM_CTX_ZOMBIE; - DPRINT(("zombie ctx for [%d]\n", task->pid)); + DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); /* * cannot free the context on the spot. deferred until * the task notices the ZOMBIE state @@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t /* invoke and lock buffer format, if found */ fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); if (fmt == NULL) { - DPRINT(("[%d] cannot find buffer format\n", task->pid)); + DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task))); return -EINVAL; } @@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); - DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); + DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret)); if (ret) goto error; @@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) * no kernel task or task not owner by caller */ if (task->mm == NULL) { - DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); + DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task))); return -EPERM; } if (pfm_bad_permissions(task)) { - DPRINT(("no permission to attach to [%d]\n", task->pid)); + DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task))); return -EPERM; } /* * cannot block in self-monitoring mode */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { - DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); + DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task))); return -EINVAL; } if (task->exit_state == EXIT_ZOMBIE) { - DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); + DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) if (task == current) return 0; if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { - DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); + DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state)); return -EBUSY; } /* @@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task) if (pmu_conf->use_rr_dbregs == 0) return 0; - DPRINT(("called for [%d]\n", task->pid)); + DPRINT(("called for [%d]\n", task_pid_nr(task))); /* * do it only once @@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task) DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", pfm_sessions.pfs_ptrace_use_dbregs, pfm_sessions.pfs_sys_use_dbregs, - task->pid, ret)); + task_pid_nr(task), ret)); UNLOCK_PFS(flags); @@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task) LOCK_PFS(flags); if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { - printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); + printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task)); ret = -1; } else { pfm_sessions.pfs_ptrace_use_dbregs--; @@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* sanity check */ if (unlikely(task == NULL)) { - printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); + printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current)); return -EINVAL; } @@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) fmt = ctx->ctx_buf_fmt; DPRINT(("restarting self %d ovfl=0x%lx\n", - task->pid, + task_pid_nr(task), ctx->ctx_ovfl_regs[0])); if (CTX_HAS_SMPL(ctx)) { @@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); if (rst_ctrl.bits.mask_monitoring == 0) { - DPRINT(("resuming monitoring for [%d]\n", task->pid)); + DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task))); if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); } else { - DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); + DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task))); // cannot use pfm_stop_monitoring(task, regs); } @@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * "self-monitoring". */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d] \n", task->pid)); + DPRINT(("unblocking [%d] \n", task_pid_nr(task))); complete(&ctx->ctx_restart_done); } else { - DPRINT(("[%d] armed exit trap\n", task->pid)); + DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; @@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * don't bother if we are loaded and task is being debugged */ if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { - DPRINT(("debug registers already in use for [%d]\n", task->pid)); + DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task))); return -EBUSY; } @@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_ * is shared by all processes running on it */ if (first_time && can_access_pmu) { - DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); + DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task))); for (i=0; i < pmu_conf->num_ibrs; i++) { ia64_set_ibr(i, 0UL); ia64_dv_serialize_instruction(); @@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) return -EBUSY; } DPRINT(("task [%d] ctx_state=%d is_system=%d\n", - PFM_CTX_TASK(ctx)->pid, + task_pid_nr(PFM_CTX_TASK(ctx)), state, is_system)); /* @@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * monitoring disabled in kernel at next reschedule */ ctx->ctx_saved_psr_up = 0; - DPRINT(("task=[%d]\n", task->pid)); + DPRINT(("task=[%d]\n", task_pid_nr(task))); } return 0; } @@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) if (is_system) { if (pfm_sessions.pfs_ptrace_use_dbregs) { - DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); + DPRINT(("cannot load [%d] dbregs in use\n", + task_pid_nr(task))); ret = -EBUSY; } else { pfm_sessions.pfs_sys_use_dbregs++; - DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); + DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs)); set_dbregs = 1; } } @@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) /* allow user level control */ ia64_psr(regs)->sp = 0; - DPRINT(("clearing psr.sp for [%d]\n", task->pid)); + DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task))); SET_LAST_CPU(ctx, smp_processor_id()); INC_ACTIVATION(); @@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) */ SET_PMU_OWNER(task, ctx); - DPRINT(("context loaded on PMU for [%d]\n", task->pid)); + DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task))); } else { /* * when not current, task MUST be stopped, so this is safe @@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg int prev_state, is_system; int ret; - DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); + DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); prev_state = ctx->ctx_state; is_system = ctx->ctx_fl_system; @@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg */ ia64_psr(regs)->sp = 1; - DPRINT(("setting psr.sp for [%d]\n", task->pid)); + DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task))); } /* * save PMDs to context @@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg ctx->ctx_fl_can_restart = 0; ctx->ctx_fl_going_zombie = 0; - DPRINT(("disconnected [%d] from context\n", task->pid)); + DPRINT(("disconnected [%d] from context\n", task_pid_nr(task))); return 0; } @@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task) PROTECT_CTX(ctx, flags); - DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); + DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); state = ctx->ctx_state; switch(state) { @@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task) * only comes to this function if pfm_context is not NULL, i.e., cannot * be in unloaded state */ - printk(K |