diff options
Diffstat (limited to 'arch/powerpc/kernel/kprobes.c')
| -rw-r--r-- | arch/powerpc/kernel/kprobes.c | 283 |
1 files changed, 187 insertions, 96 deletions
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index cfab48566db..2f72af82513 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -1,6 +1,5 @@ /* * Kernel Probes (KProbes) - * arch/ppc64/kernel/kprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -27,17 +26,22 @@ * for PPC64 */ -#include <linux/config.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> +#include <linux/module.h> +#include <linux/kdebug.h> +#include <linux/slab.h> +#include <asm/code-patching.h> #include <asm/cacheflush.h> -#include <asm/kdebug.h> #include <asm/sstep.h> +#include <asm/uaccess.h> DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); +struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; + int __kprobes arch_prepare_kprobe(struct kprobe *p) { int ret = 0; @@ -46,12 +50,13 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) if ((unsigned long)p->addr & 0x03) { printk("Attempt to register kprobe at an unaligned address\n"); ret = -EINVAL; - } else if (IS_MTMSRD(insn) || IS_RFID(insn)) { - printk("Cannot register a kprobe on rfid or mtmsrd\n"); + } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { + printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); ret = -EINVAL; } - /* insn must be on a special executable page on ppc64 */ + /* insn must be on a special executable page on ppc64. This is + * not explicitly required on ppc32 (right now), but it doesn't hurt */ if (!ret) { p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) @@ -59,10 +64,14 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) } if (!ret) { - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + memcpy(p->ainsn.insn, p->addr, + MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); p->opcode = *p->addr; + flush_icache_range((unsigned long)p->ainsn.insn, + (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); } + p->ainsn.boostable = 0; return ret; } @@ -82,65 +91,56 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - down(&kprobe_mutex); - free_insn_slot(p->ainsn.insn); - up(&kprobe_mutex); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } -static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) { - kprobe_opcode_t insn = *p->ainsn.insn; + enable_single_step(regs); - regs->msr |= MSR_SE; - - /* single step inline if it is a trap variant */ - if (is_trap(insn)) - regs->nip = (unsigned long)p->addr; - else - regs->nip = (unsigned long)p->ainsn.insn; + /* + * On powerpc we should single step on the original + * instruction even if the probed insn is a trap + * variant as values in regs could play a part in + * if the trap is taken or not + */ + regs->nip = (unsigned long)p->ainsn.insn; } -static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; } -static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; kcb->kprobe_status = kcb->prev_kprobe.status; kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; } -static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { __get_cpu_var(current_kprobe) = p; kcb->kprobe_saved_msr = regs->msr; } -/* Called with kretprobe_lock held */ -void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { - struct kretprobe_instance *ri; - - if ((ri = get_free_rp_inst(rp)) != NULL) { - ri->rp = rp; - ri->task = current; - ri->ret_addr = (kprobe_opcode_t *)regs->link; - - /* Replace the return addr with trampoline addr */ - regs->link = (unsigned long)kretprobe_trampoline; - add_rp_inst(ri); - } else { - rp->nmissed++; - } + ri->ret_addr = (kprobe_opcode_t *)regs->link; + + /* Replace the return addr with trampoline addr */ + regs->link = (unsigned long)kretprobe_trampoline; } -static inline int kprobe_handler(struct pt_regs *regs) +static int __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p; int ret = 0; @@ -161,7 +161,8 @@ static inline int kprobe_handler(struct pt_regs *regs) kprobe_opcode_t insn = *p->ainsn.insn; if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { - regs->msr &= ~MSR_SE; + /* Turn off 'trace' bits */ + regs->msr &= ~MSR_SINGLESTEP; regs->msr |= kcb->kprobe_saved_msr; goto no_kprobe; } @@ -230,6 +231,38 @@ static inline int kprobe_handler(struct pt_regs *regs) return 1; ss_probe: + if (p->ainsn.boostable >= 0) { + unsigned int insn = *p->ainsn.insn; + + /* regs->nip is also adjusted if emulate_step returns 1 */ + ret = emulate_step(regs, insn); + if (ret > 0) { + /* + * Once this instruction has been boosted + * successfully, set the boostable flag + */ + if (unlikely(p->ainsn.boostable == 0)) + p->ainsn.boostable = 1; + + if (p->post_handler) + p->post_handler(p, regs, 0); + + kcb->kprobe_status = KPROBE_HIT_SSDONE; + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } else if (ret < 0) { + /* + * We don't allow kprobes on mtmsr(d)/rfi(d), etc. + * So, we should never get here... but, its still + * good to catch them, just in case... + */ + printk("Can't step on instruction %x\n", insn); + BUG(); + } else if (ret == 0) + /* This instruction can't be boosted */ + p->ainsn.boostable = -1; + } prepare_singlestep(p, regs); kcb->kprobe_status = KPROBE_HIT_SS; return 1; @@ -245,7 +278,7 @@ no_kprobe: * - When the probed function returns, this probe * causes the handlers to fire */ -void kretprobe_trampoline_holder(void) +static void __used kretprobe_trampoline_holder(void) { asm volatile(".global kretprobe_trampoline\n" "kretprobe_trampoline:\n" @@ -255,40 +288,41 @@ void kretprobe_trampoline_holder(void) /* * Called when the probe at kretprobe trampoline is hit */ -int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) { - struct kretprobe_instance *ri = NULL; - struct hlist_head *head; - struct hlist_node *node, *tmp; + struct kretprobe_instance *ri = NULL; + struct hlist_head *head, empty_rp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; - spin_lock_irqsave(&kretprobe_lock, flags); - head = kretprobe_inst_table_head(current); + INIT_HLIST_HEAD(&empty_rp); + kretprobe_hash_lock(current, &head, &flags); /* * It is possible to have multiple instances associated with a given * task either because an multiple functions in the call path - * have a return probe installed on them, and/or more then one return + * have a return probe installed on them, and/or more than one return * return probe was registered for a target function. * * We can handle this because: * - instances are always inserted at the head of the list * - when multiple return probes are registered for the same - * function, the first instance's ret_addr will point to the + * function, the first instance's ret_addr will point to the * real return address, and all the rest will point to * kretprobe_trampoline */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { - if (ri->task != current) + hlist_for_each_entry_safe(ri, tmp, head, hlist) { + if (ri->task != current) /* another task is sharing our hash bucket */ - continue; + continue; if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); orig_ret_address = (unsigned long)ri->ret_addr; - recycle_rp_inst(ri); + recycle_rp_inst(ri, &empty_rp); if (orig_ret_address != trampoline_address) /* @@ -299,19 +333,23 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); + kretprobe_assert(ri, orig_ret_address, trampoline_address); regs->nip = orig_ret_address; reset_current_kprobe(); - spin_unlock_irqrestore(&kretprobe_lock, flags); + kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); - /* - * By returning a non-zero value, we are telling - * kprobe_handler() that we don't want the post_handler - * to run (and have re-enabled preemption) - */ - return 1; + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { + hlist_del(&ri->hlist); + kfree(ri); + } + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; } /* @@ -322,18 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * single-stepped a copy of the instruction. The address of this * copy is p->ainsn.insn. */ -static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) -{ - int ret; - unsigned int insn = *p->ainsn.insn; - - regs->nip = (unsigned long)p->addr; - ret = emulate_step(regs, insn); - if (ret == 0) - regs->nip = (unsigned long)p->addr + 4; -} - -static inline int post_kprobe_handler(struct pt_regs *regs) +static int __kprobes post_kprobe_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -341,12 +368,17 @@ static inline int post_kprobe_handler(struct pt_regs *regs) if (!cur) return 0; + /* make sure we got here for instruction we have a kprobe on */ + if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) + return 0; + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { kcb->kprobe_status = KPROBE_HIT_SSDONE; cur->post_handler(cur, regs, 0); } - resume_execution(cur, regs); + /* Adjust nip to after the single-stepped instruction */ + regs->nip = (unsigned long)cur->addr + 4; regs->msr |= kcb->kprobe_saved_msr; /*Restore back the original saved kprobes variables and continue. */ @@ -360,30 +392,75 @@ out: /* * if somebody else is singlestepping across a probe point, msr - * will have SE set, in which case, continue the remaining processing + * will have DE/SE set, in which case, continue the remaining processing * of do_debug, as if this is not a probe hit. */ - if (regs->msr & MSR_SE) + if (regs->msr & MSR_SINGLESTEP) return 0; return 1; } -static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs); - regs->msr &= ~MSR_SE; + const struct exception_table_entry *entry; + + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the nip points back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->nip = (unsigned long)cur->addr; + regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ regs->msr |= kcb->kprobe_saved_msr; - - reset_current_kprobe(); + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accounting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if ((entry = search_exception_tables(regs->nip)) != NULL) { + regs->nip = entry->fixup; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; } return 0; } @@ -397,6 +474,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch (val) { case DIE_BPT: if (kprobe_handler(args->regs)) @@ -406,20 +486,17 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, if (post_kprobe_handler(args->regs)) ret = NOTIFY_STOP; break; - case DIE_PAGE_FAULT: - /* kprobe_running() needs smp_processor_id() */ - preempt_disable(); - if (kprobe_running() && - kprobe_fault_handler(args->regs, args->trapnr)) - ret = NOTIFY_STOP; - preempt_enable(); - break; default: break; } return ret; } +unsigned long arch_deref_entry_point(void *entry) +{ + return ppc_global_function_entry(entry); +} + int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); @@ -428,18 +505,24 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ - regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); + regs->nip = arch_deref_entry_point(jp->entry); +#ifdef CONFIG_PPC64 +#if defined(_CALL_ELF) && _CALL_ELF == 2 + regs->gpr[12] = (unsigned long)jp->entry; +#else regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); +#endif +#endif return 1; } -void __kprobes jprobe_return(void) +void __used __kprobes jprobe_return(void) { asm volatile("trap" ::: "memory"); } -void __kprobes jprobe_return_end(void) +static void __used __kprobes jprobe_return_end(void) { }; @@ -466,3 +549,11 @@ int __init arch_init_kprobes(void) { return register_kprobe(&trampoline_p); } + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) + return 1; + + return 0; +} |
