diff options
Diffstat (limited to 'arch/ia64/kernel/perfmon.c')
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 490 |
1 files changed, 211 insertions, 279 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index a2aabfdc80d..5845ffea67c 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -40,6 +40,9 @@ #include <linux/capability.h> #include <linux/rcupdate.h> #include <linux/completion.h> +#include <linux/tracehook.h> +#include <linux/slab.h> +#include <linux/cpu.h> #include <asm/errno.h> #include <asm/intrinsics.h> @@ -47,7 +50,6 @@ #include <asm/perfmon.h> #include <asm/processor.h> #include <asm/signal.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/delay.h> @@ -311,7 +313,7 @@ typedef struct pfm_context { unsigned long th_pmcs[PFM_NUM_PMC_REGS]; /* PMC thread save state */ unsigned long th_pmds[PFM_NUM_PMD_REGS]; /* PMD thread save state */ - u64 ctx_saved_psr_up; /* only contains psr.up value */ + unsigned long ctx_saved_psr_up; /* only contains psr.up value */ unsigned long ctx_last_activation; /* context last activation number for last_cpu */ unsigned int ctx_last_cpu; /* CPU id of current or last CPU used (SMP only) */ @@ -519,53 +521,47 @@ static pmu_config_t *pmu_conf; pfm_sysctl_t pfm_sysctl; EXPORT_SYMBOL(pfm_sysctl); -static ctl_table pfm_ctl_table[]={ +static struct ctl_table pfm_ctl_table[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "debug", .data = &pfm_sysctl.debug, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "debug_ovfl", .data = &pfm_sysctl.debug_ovfl, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "fastctxsw", .data = &pfm_sysctl.fastctxsw, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "expert_mode", .data = &pfm_sysctl.expert_mode, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; -static ctl_table pfm_sysctl_dir[] = { +static struct ctl_table pfm_sysctl_dir[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "perfmon", .mode = 0555, .child = pfm_ctl_table, }, {} }; -static ctl_table pfm_sysctl_root[] = { +static struct ctl_table pfm_sysctl_root[] = { { - .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, .child = pfm_sysctl_dir, @@ -609,31 +605,22 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) spin_unlock(&(x)->ctx_lock); } -static inline unsigned int -pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) -{ - return do_munmap(mm, addr, len); -} - -static inline unsigned long -pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) -{ - return get_unmapped_area(file, addr, len, pgoff, flags); -} - +/* forward declaration */ +static const struct dentry_operations pfmfs_dentry_operations; -static int -pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, - struct vfsmount *mnt) +static struct dentry * +pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt); + return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations, + PFMFS_MAGIC); } static struct file_system_type pfm_fs_type = { .name = "pfmfs", - .get_sb = pfmfs_get_sb, + .mount = pfmfs_mount, .kill_sb = kill_anon_super, }; +MODULE_ALIAS_FS("pfmfs"); DEFINE_PER_CPU(unsigned long, pfm_syst_info); DEFINE_PER_CPU(struct task_struct *, pmu_owner); @@ -834,10 +821,9 @@ pfm_rvmalloc(unsigned long size) unsigned long addr; size = PAGE_ALIGN(size); - mem = vmalloc(size); + mem = vzalloc(size); if (mem) { //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); - memset(mem, 0, size); addr = (unsigned long)mem; while (size > 0) { pfm_reserve_page(addr); @@ -867,7 +853,7 @@ pfm_rvfree(void *mem, unsigned long size) } static pfm_context_t * -pfm_context_alloc(void) +pfm_context_alloc(int ctx_flags) { pfm_context_t *ctx; @@ -878,6 +864,46 @@ pfm_context_alloc(void) ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); if (ctx) { DPRINT(("alloc ctx @%p\n", ctx)); + + /* + * init context protection lock + */ + spin_lock_init(&ctx->ctx_lock); + + /* + * context is unloaded + */ + ctx->ctx_state = PFM_CTX_UNLOADED; + + /* + * initialization of context's flags + */ + ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; + ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; + ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; + /* + * will move to set properties + * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; + */ + + /* + * init restart semaphore to locked + */ + init_completion(&ctx->ctx_restart_done); + + /* + * activation is used in SMP only + */ + ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; + SET_LAST_CPU(ctx, -1); + + /* + * initialize notification message queue + */ + ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; + init_waitqueue_head(&ctx->ctx_msgq_wait); + init_waitqueue_head(&ctx->ctx_zombieq); + } return ctx; } @@ -1297,8 +1323,6 @@ out: } EXPORT_SYMBOL(pfm_unregister_buffer_fmt); -extern void update_pal_halt_status(int); - static int pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) { @@ -1346,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu) cpu)); /* - * disable default_idle() to go to PAL_HALT + * Force idle() into poll mode */ - update_pal_halt_status(0); + cpu_idle_poll_ctrl(true); UNLOCK_PFS(flags); @@ -1405,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) is_syswide, cpu)); - /* - * if possible, enable default_idle() to go into PAL_HALT - */ - if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) - update_pal_halt_status(1); + /* Undo forced polling. Last session reenables pal_halt */ + cpu_idle_poll_ctrl(false); UNLOCK_PFS(flags); @@ -1422,8 +1443,9 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) * a PROTECT_CTX() section. */ static int -pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) +pfm_remove_smpl_mapping(void *vaddr, unsigned long size) { + struct task_struct *task = current; int r; /* sanity checks */ @@ -1437,13 +1459,8 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz /* * does the actual unmapping */ - down_write(&task->mm->mmap_sem); - - DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); + r = vm_munmap((unsigned long)vaddr, size); - r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); - - up_write(&task->mm->mmap_sem); if (r !=0) { printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); } @@ -1507,7 +1524,7 @@ pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt) * any operations on the root directory. However, we need a non-trivial * d_name - pfm: will go nicely and kill the special-casing in procfs. */ -static struct vfsmount *pfmfs_mnt; +static struct vfsmount *pfmfs_mnt __read_mostly; static int __init init_pfm_fs(void) @@ -1537,7 +1554,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) return -EINVAL; } - ctx = (pfm_context_t *)filp->private_data; + ctx = filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); return -EINVAL; @@ -1637,7 +1654,7 @@ pfm_poll(struct file *filp, poll_table * wait) return 0; } - ctx = (pfm_context_t *)filp->private_data; + ctx = filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); return 0; @@ -1660,8 +1677,8 @@ pfm_poll(struct file *filp, poll_table * wait) return mask; } -static int -pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +static long +pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { DPRINT(("pfm_ioctl called\n")); return -EINVAL; @@ -1697,7 +1714,7 @@ pfm_fasync(int fd, struct file *filp, int on) return -EBADF; } - ctx = (pfm_context_t *)filp->private_data; + ctx = filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; @@ -1780,7 +1797,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) int ret; DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); - ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); + ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); } #endif /* CONFIG_SMP */ @@ -1805,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id) return -EBADF; } - ctx = (pfm_context_t *)filp->private_data; + ctx = filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; @@ -1824,11 +1841,6 @@ pfm_flush(struct file *filp, fl_owner_t id) * invoked after, it will find an empty queue and no * signal will be sent. In both case, we are safe */ - if (filp->f_flags & FASYNC) { - DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); - pfm_do_fasync (-1, filp, ctx, 0); - } - PROTECT_CTX(ctx, flags); state = ctx->ctx_state; @@ -1914,7 +1926,7 @@ pfm_flush(struct file *filp, fl_owner_t id) * because some VM function reenables interrupts. * */ - if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); + if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size); return 0; } @@ -1953,7 +1965,7 @@ pfm_close(struct inode *inode, struct file *filp) return -EBADF; } - ctx = (pfm_context_t *)filp->private_data; + ctx = filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); return -EBADF; @@ -2143,115 +2155,72 @@ pfm_no_open(struct inode *irrelevant, struct file *dontcare) static const struct file_operations pfm_file_ops = { - .llseek = no_llseek, - .read = pfm_read, - .write = pfm_write, - .poll = pfm_poll, - .ioctl = pfm_ioctl, - .open = pfm_no_open, /* special open code to disallow open via /proc */ - .fasync = pfm_fasync, - .release = pfm_close, - .flush = pfm_flush + .llseek = no_llseek, + .read = pfm_read, + .write = pfm_write, + .poll = pfm_poll, + .unlocked_ioctl = pfm_ioctl, + .open = pfm_no_open, /* special open code to disallow open via /proc */ + .fasync = pfm_fasync, + .release = pfm_close, + .flush = pfm_flush }; -static int -pfmfs_delete_dentry(struct dentry *dentry) +static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen) { - return 1; + return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]", + dentry->d_inode->i_ino); } -static struct dentry_operations pfmfs_dentry_operations = { - .d_delete = pfmfs_delete_dentry, +static const struct dentry_operations pfmfs_dentry_operations = { + .d_delete = always_delete_dentry, + .d_dname = pfmfs_dname, }; -static int -pfm_alloc_fd(struct file **cfile) +static struct file * +pfm_alloc_file(pfm_context_t *ctx) { - int fd, ret = 0; - struct file *file = NULL; - struct inode * inode; - char name[32]; - struct qstr this; - - fd = get_unused_fd(); - if (fd < 0) return -ENFILE; - - ret = -ENFILE; - - file = get_empty_filp(); - if (!file) goto out; + struct file *file; + struct inode *inode; + struct path path; + struct qstr this = { .name = "" }; /* * allocate a new inode */ inode = new_inode(pfmfs_mnt->mnt_sb); - if (!inode) goto out; + if (!inode) + return ERR_PTR(-ENOMEM); DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode)); inode->i_mode = S_IFCHR|S_IRUGO; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - - sprintf(name, "[%lu]", inode->i_ino); - this.name = name; - this.len = strlen(name); - this.hash = inode->i_ino; - - ret = -ENOMEM; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); /* * allocate a new dcache entry */ - file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); - if (!file->f_path.dentry) goto out; + path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this); + if (!path.dentry) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + path.mnt = mntget(pfmfs_mnt); - file->f_path.dentry->d_op = &pfmfs_dentry_operations; + d_add(path.dentry, inode); - d_add(file->f_path.dentry, inode); - file->f_path.mnt = mntget(pfmfs_mnt); - file->f_mapping = inode->i_mapping; + file = alloc_file(&path, FMODE_READ, &pfm_file_ops); + if (IS_ERR(file)) { + path_put(&path); + return file; + } - file->f_op = &pfm_file_ops; - file->f_mode = FMODE_READ; file->f_flags = O_RDONLY; - file->f_pos = 0; + file->private_data = ctx; - /* - * may have to delay until context is attached? - */ - fd_install(fd, file); - - /* - * the file structure we will use - */ - *cfile = file; - - return fd; -out: - if (file) put_filp(file); - put_unused_fd(fd); - return ret; -} - -static void -pfm_free_fd(int fd, struct file *file) -{ - struct files_struct *files = current->files; - struct fdtable *fdt; - - /* - * there ie no fd_uninstall(), so we do it here - */ - spin_lock(&files->file_lock); - fdt = files_fdtable(files); - rcu_assign_pointer(fdt->fd[fd], NULL); - spin_unlock(&files->file_lock); - - if (file) - put_filp(file); - put_unused_fd(fd); + return file; } static int @@ -2300,7 +2269,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) * return -ENOMEM; */ - if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) + if (size > task_rlimit(task, RLIMIT_MEMLOCK)) return -ENOMEM; /* @@ -2322,13 +2291,14 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t DPRINT(("Cannot allocate vma\n")); goto error_kmem; } + INIT_LIST_HEAD(&vma->anon_vma_chain); /* * partially initialize the vma for the sampling buffer */ vma->vm_mm = mm; - vma->vm_file = filp; - vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED; + vma->vm_file = get_file(filp); + vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP; vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */ /* @@ -2348,8 +2318,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t down_write(&task->mm->mmap_sem); /* find some free area in address space, must have mmap sem held */ - vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); - if (vma->vm_start == 0UL) { + vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); + if (IS_ERR_VALUE(vma->vm_start)) { DPRINT(("Cannot find unmapped area for size %ld\n", size)); up_write(&task->mm->mmap_sem); goto error; @@ -2366,15 +2336,12 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t goto error; } - get_file(filp); - /* * now insert the vma in the vm list for the process, must be * done with mmap lock held */ insert_vm_struct(mm, vma); - mm->total_vm += size >> PAGE_SHIFT; vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, vma_pages(vma)); up_write(&task->mm->mmap_sem); @@ -2401,22 +2368,33 @@ error_kmem: static int pfm_bad_permissions(struct task_struct *task) { + const struct cred *tcred; + kuid_t uid = current_uid(); + kgid_t gid = current_gid(); + int ret; + + rcu_read_lock(); + tcred = __task_cred(task); + /* inspired by ptrace_attach() */ DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n", - current->uid, - current->gid, - task->euid, - task->suid, - task->uid, - task->egid, - task->sgid)); - - return ((current->uid != task->euid) - || (current->uid != task->suid) - || (current->uid != task->uid) - || (current->gid != task->egid) - || (current->gid != task->sgid) - || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE); + from_kuid(&init_user_ns, uid), + from_kgid(&init_user_ns, gid), + from_kuid(&init_user_ns, tcred->euid), + from_kuid(&init_user_ns, tcred->suid), + from_kuid(&init_user_ns, tcred->uid), + from_kgid(&init_user_ns, tcred->egid), + from_kgid(&init_user_ns, tcred->sgid))); + + ret = ((!uid_eq(uid, tcred->euid)) + || (!uid_eq(uid, tcred->suid)) + || (!uid_eq(uid, tcred->uid)) + || (!gid_eq(gid, tcred->egid)) + || (!gid_eq(gid, tcred->sgid)) + || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE); + + rcu_read_unlock(); + return ret; } static int @@ -2475,6 +2453,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t /* link buffer format and context */ ctx->ctx_buf_fmt = fmt; + ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */ /* * check if buffer format wants to use perfmon buffer allocation/mapping service @@ -2623,7 +2602,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) /* * make sure the task is off any CPU */ - wait_task_inactive(task); + wait_task_inactive(task, 0); /* more to come... */ @@ -2669,79 +2648,46 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg { pfarg_context_t *req = (pfarg_context_t *)arg; struct file *filp; + struct path path; int ctx_flags; + int fd; int ret; /* let's check the arguments first */ ret = pfarg_is_sane(current, req); - if (ret < 0) return ret; + if (ret < 0) + return ret; ctx_flags = req->ctx_flags; ret = -ENOMEM; - ctx = pfm_context_alloc(); - if (!ctx) goto error; + fd = get_unused_fd(); + if (fd < 0) + return fd; - ret = pfm_alloc_fd(&filp); - if (ret < 0) goto error_file; + ctx = pfm_context_alloc(ctx_flags); + if (!ctx) + goto error; - req->ctx_fd = ctx->ctx_fd = ret; + filp = pfm_alloc_file(ctx); + if (IS_ERR(filp)) { + ret = PTR_ERR(filp); + goto error_file; + } - /* - * attach context to file - */ - filp->private_data = ctx; + req->ctx_fd = ctx->ctx_fd = fd; /* * does the user want to sample? */ if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) { ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); - if (ret) goto buffer_error; + if (ret) + goto buffer_error; } - /* - * init context protection lock - */ - spin_lock_init(&ctx->ctx_lock); - - /* - * context is unloaded - */ - ctx->ctx_state = PFM_CTX_UNLOADED; - - /* - * initialization of context's flags - */ - ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; - ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; - ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0; /* assume record() is defined */ - ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; - /* - * will move to set properties - * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; - */ - - /* - * init restart semaphore to locked - */ - init_completion(&ctx->ctx_restart_done); - - /* - * activation is used in SMP only - */ - ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; - SET_LAST_CPU(ctx, -1); - - /* - * initialize notification message queue - */ - ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; - init_waitqueue_head(&ctx->ctx_msgq_wait); - init_waitqueue_head(&ctx->ctx_zombieq); - - DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", + DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", ctx, ctx_flags, ctx->ctx_fl_system, @@ -2755,10 +2701,14 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg */ pfm_reset_pmu_state(ctx); + fd_install(fd, filp); + return 0; buffer_error: - pfm_free_fd(ctx->ctx_fd, filp); + path = filp->f_path; + put_filp(filp); + path_put(&path); if (ctx->ctx_buf_fmt) { pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); @@ -2767,6 +2717,7 @@ error_file: pfm_context_free(ctx); error: + put_unused_fd(fd); return ret; } @@ -3541,7 +3492,7 @@ pfm_use_debug_registers(struct task_struct *task) * IA64_THREAD_DBG_VALID set. This indicates a task which was * able to use the debug registers for debugging purposes via * ptrace(). Therefore we know it was not using them for - * perfmormance monitoring, so we only decrement the number + * performance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */ int @@ -3700,7 +3651,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) * "self-monitoring". */ if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { - DPRINT(("unblocking [%d] \n", task_pid_nr(task))); + DPRINT(("unblocking [%d]\n", task_pid_nr(task))); complete(&ctx->ctx_restart_done); } else { DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); @@ -3709,7 +3660,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) PFM_SET_WORK_PENDING(task, 1); - tsk_set_notify_resume(task); + set_notify_resume(task); /* * XXX: send reschedule if task runs on another CPU @@ -4204,10 +4155,10 @@ pfm_check_task_exist(pfm_context_t *ctx) do_each_thread (g, t) { if (t->thread.pfm_context == ctx) { ret = 0; - break; + goto out; } } while_each_thread (g, t); - +out: read_unlock(&tasklist_lock); DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx)); @@ -4799,7 +4750,7 @@ recheck: UNPROTECT_CTX(ctx, flags); - wait_task_inactive(task); + wait_task_inactive(task, 0); PROTECT_CTX(ctx, flags); @@ -4820,7 +4771,7 @@ recheck: asmlinkage long sys_perfmonctl (int fd, int cmd, void __user *arg, int count) { - struct file *file = NULL; + struct fd f = {NULL, 0}; pfm_context_t *ctx = NULL; unsigned long flags = 0UL; void *args_k = NULL; @@ -4917,17 +4868,17 @@ restart_args: ret = -EBADF; - file = fget(fd); - if (unlikely(file == NULL)) { + f = fdget(fd); + if (unlikely(f.file == NULL)) { DPRINT(("invalid fd %d\n", fd)); goto error_args; } - if (unlikely(PFM_IS_FILE(file) == 0)) { + if (unlikely(PFM_IS_FILE(f.file) == 0)) { DPRINT(("fd %d not related to perfmon\n", fd)); goto error_args; } - ctx = (pfm_context_t *)file->private_data; + ctx = f.file->private_data; if (unlikely(ctx == NULL)) { DPRINT(("no context for fd %d\n", fd)); goto error_args; @@ -4957,8 +4908,8 @@ abort_locked: if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT; error_args: - if (file) - fput(file); + if (f.file) + fdput(f); kfree(args_k); @@ -5038,12 +4989,13 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) } static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); + /* * pfm_handle_work() can be called with interrupts enabled * (TIF_NEED_RESCHED) or disabled. The down_interruptible * call may sleep, therefore we must re-enable interrupts * to avoid deadlocks. It is safe to do so because this function - * is called ONLY when returning to user level (PUStk=1), in which case + * is called ONLY when returning to user level (pUStk=1), in which case * there is no risk of kernel stack overflow due to deep * interrupt nesting. */ @@ -5059,7 +5011,8 @@ pfm_handle_work(void) ctx = PFM_GET_CTX(current); if (ctx == NULL) { - printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current)); + printk(KERN_ERR "perfmon: [%d] has no PFM context\n", + task_pid_nr(current)); return; } @@ -5067,8 +5020,6 @@ pfm_handle_work(void) PFM_SET_WORK_PENDING(current, 0); - tsk_clear_notify_resume(current); - regs = task_pt_regs(current); /* @@ -5083,11 +5034,12 @@ pfm_handle_work(void) /* * must be done before we check for simple-reset mode */ - if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie; - + if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) + goto do_zombie; //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; - if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; + if (reason == PFM_TRAP_REASON_RESET) + goto skip_blocking; /* * restore interrupt mask to what it was on entry. @@ -5135,7 +5087,8 @@ do_zombie: /* * in case of interruption of down() we don't restart anything */ - if (ret < 0) goto nothing_to_do; + if (ret < 0) + goto nothing_to_do; skip_blocking: pfm_resume_after_ovfl(ctx, ovfl_regs, regs); @@ -5229,8 +5182,8 @@ pfm_end_notify_user(pfm_context_t *ctx) * main overflow processing routine. * it can be called from the interrupt path or explicitly during the context switch code */ -static void -pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs) +static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, + unsigned long pmc0, struct pt_regs *regs) { pfm_ovfl_arg_t *ovfl_arg; unsigned long mask; @@ -5435,7 +5388,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str * when coming from ctxsw, current still points to the * previous task, therefore we must work with task and not current. */ - tsk_set_notify_resume(task); + set_notify_resume(task); } /* * defer until state is changed (shorten spin window). the context is locked @@ -5511,7 +5464,7 @@ stop_monitoring: } static int -pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs) +pfm_do_interrupt_handler(void *arg, struct pt_regs *regs) { struct task_struct *task; pfm_context_t *ctx; @@ -5591,7 +5544,7 @@ pfm_interrupt_handler(int irq, void *arg) start_cycles = ia64_get_itc(); - ret = pfm_do_interrupt_handler(irq, arg, regs); + ret = pfm_do_interrupt_handler(arg, regs); total_cycles = ia64_get_itc(); @@ -5611,7 +5564,7 @@ pfm_interrupt_handler(int irq, void *arg) (*pfm_alt_intr_handler->handler)(irq, arg, regs); } - put_cpu_no_resched(); + put_cpu(); return IRQ_HANDLED; } @@ -5619,7 +5572,7 @@ pfm_interrupt_handler(int irq, void *arg) * /proc/perfmon interface, for debug only */ -#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1) +#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1) static void * pfm_proc_start(struct seq_file *m, loff_t *pos) @@ -5628,7 +5581,7 @@ pfm_proc_start(struct seq_file *m, loff_t *pos) return PFM_PROC_SHOW_HEADER; } - while (*pos <= NR_CPUS) { + while (*pos <= nr_cpu_ids) { if (cpu_online(*pos - 1)) { return (void *)*pos; } @@ -5688,24 +5641,8 @@ pfm_proc_show_header(struct seq_file *m) list_for_each(pos, &pfm_buffer_fmt_list) { entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); - seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n", - entry->fmt_uuid[0], - entry->fmt_uuid[1], - entry->fmt_uuid[2], - entry->fmt_uuid[3], - entry->fmt_uuid[4], - entry->fmt_uuid[5], - entry->fmt_uuid[6], - entry->fmt_uuid[7], - entry->fmt_uuid[8], - entry->fmt_uuid[9], - entry->fmt_uuid[10], - entry->fmt_uuid[11], - entry->fmt_uuid[12], - entry->fmt_uuid[13], - entry->fmt_uuid[14], - entry->fmt_uuid[15], - entry->fmt_name); + seq_printf(m, "format : %16phD %s\n", + entry->fmt_uuid, entry->fmt_name); } spin_unlock(&pfm_buffer_fmt_lock); @@ -6450,7 +6387,6 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) static struct irqaction perfmon_irqaction = { .handler = pfm_interrupt_handler, - .flags = IRQF_DISABLED, .name = "perfmon" }; @@ -6529,7 +6465,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) } /* save the current system wide pmu states */ - ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); goto cleanup_reserve; @@ -6574,7 +6510,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) pfm_alt_intr_handler = NULL; - ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); + ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); if (ret) { DPRINT(("on_each_cpu() failed: %d\n", ret)); } @@ -6695,16 +6631,12 @@ pfm_init(void) /* * create /proc/perfmon (mostly for debugging purposes) */ - perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL); + perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops); if (perfmon_dir == NULL) { printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); pmu_conf = NULL; return -1; } - /* - * install customized file operations for /proc/perfmon entry - */ - perfmon_dir->proc_fops = &pfm_proc_fops; /* * create /proc/sys/kernel/perfmon (for debugging purposes) |
