aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel/ptrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/ptrace.c')
-rw-r--r--arch/mips/kernel/ptrace.c520
1 files changed, 349 insertions, 171 deletions
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a..f639ccd5060 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -15,16 +15,21 @@
* binaries.
*/
#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/regset.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/security.h>
+#include <linux/tracehook.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <linux/ftrace.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -34,11 +39,14 @@
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
-#include <asm/system.h>
+#include <asm/syscall.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -46,11 +54,12 @@
*/
void ptrace_disable(struct task_struct *child)
{
- /* Nothing to do.. */
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/*
- * Read a general register set. We always use the 64-bit format, even
+ * Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
@@ -105,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
- unsigned int tmp;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
- __put_user(fregs[i], i + (__u64 __user *) data);
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
-
- preempt_disable();
- if (cpu_has_fpu) {
- unsigned int flags;
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- }
- } else {
- tmp = 0;
- }
- preempt_enable();
- __put_user(tmp, data + 65);
+ __put_user(current_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
- fpureg_t *fregs;
+ union fpureg *fregs;
+ u64 fpr_val;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -157,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __get_user(fregs[i], i + (__u64 __user *) data);
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
__get_user(child->thread.fpu.fcr31, data + 64);
@@ -167,9 +157,261 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
return 0;
}
-long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+int ptrace_get_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ enum pt_watch_style style;
+ int i;
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+
+#ifdef CONFIG_32BIT
+ style = pt_watch_style_mips32;
+#define WATCH_STYLE mips32
+#else
+ style = pt_watch_style_mips64;
+#define WATCH_STYLE mips64
+#endif
+
+ __put_user(style, &addr->style);
+ __put_user(boot_cpu_data.watch_reg_use_cnt,
+ &addr->WATCH_STYLE.num_valid);
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __put_user(child->thread.watch.mips3264.watchlo[i],
+ &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
+ &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(boot_cpu_data.watch_reg_masks[i],
+ &addr->WATCH_STYLE.watch_masks[i]);
+ }
+ for (; i < 8; i++) {
+ __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
+ }
+
+ return 0;
+}
+
+int ptrace_set_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ int i;
+ int watch_active = 0;
+ unsigned long lt[NUM_WATCH_REGS];
+ u16 ht[NUM_WATCH_REGS];
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+ /* Check the values. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
+#ifdef CONFIG_32BIT
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+#else
+ if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
+ if (lt[i] & 0xffffffff80000000UL)
+ return -EINVAL;
+ } else {
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+ }
+#endif
+ __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
+ if (ht[i] & ~0xff8)
+ return -EINVAL;
+ }
+ /* Install them. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ if (lt[i] & 7)
+ watch_active = 1;
+ child->thread.watch.mips3264.watchlo[i] = lt[i];
+ /* Set the G bit. */
+ child->thread.watch.mips3264.watchhi[i] = ht[i];
+ }
+
+ if (watch_active)
+ set_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ else
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+
+ return 0;
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs newregs;
+ int ret;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &newregs,
+ 0, sizeof(newregs));
+ if (ret)
+ return ret;
+
+ *task_pt_regs(target) = newregs;
+
+ return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
+ /* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned i;
+ int err;
+ u64 fpr_val;
+
+ /* XXX fcr31 */
+
+ if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu,
+ 0, sizeof(elf_fpregset_t));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+enum mips_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+};
+
+static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+};
+
+static const struct user_regset_view user_mips64_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+ return &user_mips_view;
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+ if (test_thread_flag(TIF_32BIT_REGS))
+ return &user_mips_view;
+#endif
+
+ return &user_mips64_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
{
int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
@@ -181,6 +423,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
+ union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
@@ -191,26 +434,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
- if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
- else
- tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
-#endif
-#ifdef CONFIG_64BIT
- tmp = fregs[addr - FPR_BASE];
-#endif
- } else {
- tmp = -1; /* FP not yet used */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
}
+#endif
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -235,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -298,7 +507,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
ret = -EIO;
goto out;
}
- ret = put_user(tmp, (unsigned long __user *) data);
+ ret = put_user(tmp, datalp);
break;
}
@@ -318,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
@@ -327,22 +536,18 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT
- /*
- * The odd registers are actually the high order bits
- * of the values stored in the even registers - unless
- * we're using r2k_switch.S.
- */
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- fregs[addr - FPR_BASE] |= data;
+ if (test_thread_flag(TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers - unless we're using r2k_switch.S.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
}
#endif
-#ifdef CONFIG_64BIT
- fregs[addr - FPR_BASE] = data;
-#endif
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -390,54 +595,31 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__s64 __user *) data);
+ ret = ptrace_getregs(child, datavp);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__s64 __user *) data);
+ ret = ptrace_setregs(child, datavp);
break;
case PTRACE_GETFPREGS:
- ret = ptrace_getfpregs(child, (__u32 __user *) data);
+ ret = ptrace_getfpregs(child, datavp);
break;
case PTRACE_SETFPREGS:
- ret = ptrace_setfpregs(child, (__u32 __user *) data);
+ ret = ptrace_setfpregs(child, datavp);
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
- }
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child, addrp);
break;
- case PTRACE_GET_THREAD_AREA:
- ret = put_user(task_thread_info(child)->tp_value,
- (unsigned long __user *) data);
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child, addrp);
break;
default:
@@ -448,56 +630,52 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-static inline int audit_arch(void)
-{
- int arch = EM_MIPS;
-#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
-#endif
-#if defined(__LITTLE_ENDIAN)
- arch |= __AUDIT_ARCH_LE;
-#endif
- return arch;
-}
-
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
-asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
- /* do the secure computing check first */
- if (!entryexit)
- secure_computing(regs->regs[0]);
+ long ret = 0;
+ user_exit();
- if (unlikely(current->audit_context) && entryexit)
- audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
- regs->regs[2]);
+ if (secure_computing(syscall) == -1)
+ return -1;
- if (!(current->ptrace & PT_PTRACED))
- goto out;
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ ret = -1;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- goto out;
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
- /* The 0x80 provides a way for the tracing parent to distinguish
- between a syscall stop and SIGTRAP delivery */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ audit_syscall_entry(syscall_get_arch(),
+ syscall,
+ regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+ return syscall;
+}
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ /*
+ * We may come here right after calling schedule_user()
+ * or do_notify_resume(), in which case we can be in RCU
+ * user mode.
*/
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ user_exit();
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->regs[2]);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
-out:
- if (unlikely(current->audit_context) && !entryexit)
- audit_syscall_entry(audit_arch(), regs->regs[0],
- regs->regs[4], regs->regs[5],
- regs->regs[6], regs->regs[7]);
+ user_enter();
}