aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/atags.c86
-rw-r--r--arch/arm/kernel/atags.h5
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/dma-isa.c2
-rw-r--r--arch/arm/kernel/entry-armv.S159
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/kprobes-decode.c1529
-rw-r--r--arch/arm/kernel/kprobes.c447
-rw-r--r--arch/arm/kernel/machine_kexec.c2
-rw-r--r--arch/arm/kernel/relocate_kernel.S30
-rw-r--r--arch/arm/kernel/setup.c33
-rw-r--r--arch/arm/kernel/smp.c41
-rw-r--r--arch/arm/kernel/time.c17
-rw-r--r--arch/arm/kernel/traps.c26
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
16 files changed, 2253 insertions, 141 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 593b56509f4..00d44c6fbfe 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -19,6 +19,8 @@ obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
+obj-$(CONFIG_ATAGS_PROC) += atags.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags.c
new file mode 100644
index 00000000000..e2e934c3808
--- /dev/null
+++ b/arch/arm/kernel/atags.c
@@ -0,0 +1,86 @@
+#include <linux/slab.h>
+#include <linux/kexec.h>
+#include <linux/proc_fs.h>
+#include <asm/setup.h>
+#include <asm/types.h>
+#include <asm/page.h>
+
+struct buffer {
+ size_t size;
+ char *data;
+};
+static struct buffer tags_buffer;
+
+static int
+read_buffer(char* page, char** start, off_t off, int count,
+ int* eof, void* data)
+{
+ struct buffer *buffer = (struct buffer *)data;
+
+ if (off >= buffer->size) {
+ *eof = 1;
+ return 0;
+ }
+
+ count = min((int) (buffer->size - off), count);
+
+ memcpy(page, &buffer->data[off], count);
+
+ return count;
+}
+
+
+static int
+create_proc_entries(void)
+{
+ struct proc_dir_entry* tags_entry;
+
+ tags_entry = create_proc_read_entry("atags", 0400, &proc_root, read_buffer, &tags_buffer);
+ if (!tags_entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+
+static char __initdata atags_copy_buf[KEXEC_BOOT_PARAMS_SIZE];
+static char __initdata *atags_copy;
+
+void __init save_atags(const struct tag *tags)
+{
+ atags_copy = atags_copy_buf;
+ memcpy(atags_copy, tags, KEXEC_BOOT_PARAMS_SIZE);
+}
+
+
+static int __init init_atags_procfs(void)
+{
+ struct tag *tag;
+ int error;
+
+ if (!atags_copy) {
+ printk(KERN_WARNING "Exporting ATAGs: No saved tags found\n");
+ return -EIO;
+ }
+
+ for (tag = (struct tag *) atags_copy; tag->hdr.size; tag = tag_next(tag))
+ ;
+
+ tags_buffer.size = ((char *) tag - atags_copy) + sizeof(tag->hdr);
+ tags_buffer.data = kmalloc(tags_buffer.size, GFP_KERNEL);
+ if (tags_buffer.data == NULL)
+ return -ENOMEM;
+ memcpy(tags_buffer.data, atags_copy, tags_buffer.size);
+
+ error = create_proc_entries();
+ if (error) {
+ printk(KERN_ERR "Exporting ATAGs: not enough memory\n");
+ kfree(tags_buffer.data);
+ tags_buffer.size = 0;
+ tags_buffer.data = NULL;
+ }
+
+ return error;
+}
+
+arch_initcall(init_atags_procfs);
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
new file mode 100644
index 00000000000..e5f028d214a
--- /dev/null
+++ b/arch/arm/kernel/atags.h
@@ -0,0 +1,5 @@
+#ifdef CONFIG_ATAGS_PROC
+extern void save_atags(struct tag *tags);
+#else
+static inline void save_atags(struct tag *tags) { }
+#endif
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index cecf658e362..283e14fff99 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -359,7 +359,7 @@
CALL(sys_kexec_load)
CALL(sys_utimensat)
CALL(sys_signalfd)
-/* 350 */ CALL(sys_timerfd)
+/* 350 */ CALL(sys_ni_syscall)
CALL(sys_eventfd)
CALL(sys_fallocate)
#ifndef syscalls_counted
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 0a3e9ad297d..2f080a35a2d 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -216,7 +216,7 @@ void __init isa_init_dma(dma_t *dma)
request_dma(DMA_ISA_CASCADE, "cascade");
- for (i = 0; i < sizeof(dma_resources) / sizeof(dma_resources[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
request_resource(&ioport_resource, dma_resources + i);
}
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d645897652c..a46d5b45676 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -11,8 +11,8 @@
*
* Low-level vector interface routines
*
- * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
- * it to save wrong values... Be aware!
+ * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
+ * that causes it to save wrong values... Be aware!
*/
#include <asm/memory.h>
@@ -58,6 +58,12 @@
.endm
+#ifdef CONFIG_KPROBES
+ .section .kprobes.text,"ax",%progbits
+#else
+ .text
+#endif
+
/*
* Invalid mode handlers
*/
@@ -112,8 +118,8 @@ common_invalid:
#define SPFIX(code...)
#endif
- .macro svc_entry
- sub sp, sp, #S_FRAME_SIZE
+ .macro svc_entry, stack_hole=0
+ sub sp, sp, #(S_FRAME_SIZE + \stack_hole)
SPFIX( tst sp, #4 )
SPFIX( bicne sp, sp, #4 )
stmib sp, {r1 - r12}
@@ -121,7 +127,7 @@ common_invalid:
ldmia r0, {r1 - r3}
add r5, sp, #S_SP @ here for interlock avoidance
mov r4, #-1 @ "" "" "" ""
- add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
+ add r0, sp, #(S_FRAME_SIZE + \stack_hole)
SPFIX( addne r0, r0, #4 )
str r1, [sp] @ save the "real" r0 copied
@ from the exception stack
@@ -242,7 +248,14 @@ svc_preempt:
.align 5
__und_svc:
+#ifdef CONFIG_KPROBES
+ @ If a kprobe is about to simulate a "stmdb sp..." instruction,
+ @ it obviously needs free stack space which then will belong to
+ @ the saved context.
+ svc_entry 64
+#else
svc_entry
+#endif
@
@ call emulation code, which returns using r9 if it has emulated
@@ -339,16 +352,6 @@ __pabt_svc:
str r1, [sp] @ save the "real" r0 copied
@ from the exception stack
-#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
-#ifndef CONFIG_MMU
-#warning "NPTL on non MMU needs fixing"
-#else
- @ make sure our user space atomic helper is aborted
- cmp r2, #TASK_SIZE
- bichs r3, r3, #PSR_Z_BIT
-#endif
-#endif
-
@
@ We are now ready to fill in the remaining blanks on the stack:
@
@@ -372,9 +375,25 @@ __pabt_svc:
zero_fp
.endm
+ .macro kuser_cmpxchg_check
+#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#ifndef CONFIG_MMU
+#warning "NPTL on non MMU needs fixing"
+#else
+ @ Make sure our user space atomic helper is restarted
+ @ if it was interrupted in a critical region. Here we
+ @ perform a quick test inline since it should be false
+ @ 99.9999% of the time. The rest is done out of line.
+ cmp r2, #TASK_SIZE
+ blhs kuser_cmpxchg_fixup
+#endif
+#endif
+ .endm
+
.align 5
__dabt_usr:
usr_entry
+ kuser_cmpxchg_check
@
@ Call the processor-specific abort handler:
@@ -404,6 +423,7 @@ __dabt_usr:
.align 5
__irq_usr:
usr_entry
+ kuser_cmpxchg_check
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -446,9 +466,9 @@ __und_usr:
@
@ r0 - instruction
@
-1: ldrt r0, [r4]
adr r9, ret_from_exception
adr lr, __und_usr_unknown
+1: ldrt r0, [r4]
@
@ fallthrough to call_fpe
@
@@ -473,6 +493,13 @@ __und_usr:
* co-processor instructions. However, we have to watch out
* for the ARM6/ARM7 SWI bug.
*
+ * NEON is a special case that has to be handled here. Not all
+ * NEON instructions are co-processor instructions, so we have
+ * to make a special case of checking for them. Plus, there's
+ * five groups of them, so we have a table of mask/opcode pairs
+ * to check against, and if any match then we branch off into the
+ * NEON handler code.
+ *
* Emulators may wish to make use of the following registers:
* r0 = instruction opcode.
* r2 = PC+4
@@ -481,6 +508,23 @@ __und_usr:
* lr = unrecognised instruction return address
*/
call_fpe:
+#ifdef CONFIG_NEON
+ adr r6, .LCneon_opcodes
+2:
+ ldr r7, [r6], #4 @ mask value
+ cmp r7, #0 @ end mask?
+ beq 1f
+ and r8, r0, r7
+ ldr r7, [r6], #4 @ opcode bits matching in mask
+ cmp r8, r7 @ NEON instruction?
+ bne 2b
+ get_thread_info r10
+ mov r7, #1
+ strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
+ strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
+ b do_vfp @ let VFP handler handle this
+1:
+#endif
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
and r8, r0, #0x0f000000 @ mask out op-code bits
@@ -530,6 +574,20 @@ call_fpe:
mov pc, lr @ CP#14 (Debug)
mov pc, lr @ CP#15 (Control)
+#ifdef CONFIG_NEON
+ .align 6
+
+.LCneon_opcodes:
+ .word 0xfe000000 @ mask
+ .word 0xf2000000 @ opcode
+
+ .word 0xff100000 @ mask
+ .word 0xf4000000 @ opcode
+
+ .word 0x00000000 @ mask
+ .word 0x00000000 @ opcode
+#endif
+
do_fpe:
enable_irq
ldr r4, .LCfp
@@ -548,7 +606,7 @@ do_fpe:
.data
ENTRY(fp_enter)
.word no_fp
- .text
+ .previous
no_fp: mov pc, lr
@@ -669,7 +727,7 @@ __kuser_helper_start:
*
* Clobbered:
*
- * the Z flag might be lost
+ * none
*
* Definition and user space usage example:
*
@@ -730,9 +788,6 @@ __kuser_memory_barrier: @ 0xffff0fa0
*
* - This routine already includes memory barriers as needed.
*
- * - A failure might be transient, i.e. it is possible, although unlikely,
- * that "failure" be returned even if *ptr == oldval.
- *
* For example, a user space atomic_add implementation could look like this:
*
* #define atomic_add(ptr, val) \
@@ -769,46 +824,62 @@ __kuser_cmpxchg: @ 0xffff0fc0
#elif __LINUX_ARM_ARCH__ < 6
+#ifdef CONFIG_MMU
+
/*
- * Theory of operation:
- *
- * We set the Z flag before loading oldval. If ever an exception
- * occurs we can not be sure the loaded value will still be the same
- * when the exception returns, therefore the user exception handler
- * will clear the Z flag whenever the interrupted user code was
- * actually from the kernel address space (see the usr_entry macro).
- *
- * The post-increment on the str is used to prevent a race with an
- * exception happening just after the str instruction which would
- * clear the Z flag although the exchange was done.
+ * The only thing that can break atomicity in this cmpxchg
+ * implementation is either an IRQ or a data abort exception
+ * causing another process/thread to be scheduled in the middle
+ * of the critical sequence. To prevent this, code is added to
+ * the IRQ and data abort exception handlers to set the pc back
+ * to the beginning of the critical section if it is found to be
+ * within that critical section (see kuser_cmpxchg_fixup).
*/
-#ifdef CONFIG_MMU
- teq ip, ip @ set Z flag
- ldr ip, [r2] @ load current val
- add r3, r2, #1 @ prepare store ptr
- teqeq ip, r0 @ compare with oldval if still allowed
- streq r1, [r3, #-1]! @ store newval if still allowed
- subs r0, r2, r3 @ if r2 == r3 the str occured
+1: ldr r3, [r2] @ load current val
+ subs r3, r3, r0 @ compare with oldval
+2: streq r1, [r2] @ store newval if eq
+ rsbs r0, r3, #0 @ set return val and C flag
+ usr_ret lr
+
+ .text
+kuser_cmpxchg_fixup:
+ @ Called from kuser_cmpxchg_check macro.
+ @ r2 = address of interrupted insn (must be preserved).
+ @ sp = saved regs. r7 and r8 are clobbered.
+ @ 1b = first critical insn, 2b = last critical insn.
+ @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
+ mov r7, #0xffff0fff
+ sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
+ subs r8, r2, r7
+ rsbcss r8, r8, #(2b - 1b)
+ strcs r7, [sp, #S_PC]
+ mov pc, lr
+ .previous
+
#else
#warning "NPTL on non MMU needs fixing"
mov r0, #-1
adds r0, r0, #0
-#endif
usr_ret lr
+#endif
#else
#ifdef CONFIG_SMP
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
- ldrex r3, [r2]
+1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
+ teqeq r3, #1
+ beq 1b
rsbs r0, r3, #0
+ /* beware -- each __kuser slot must be 8 instructions max */
#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c10, 5 @ dmb
-#endif
+ b __kuser_memory_barrier
+#else
usr_ret lr
+#endif
#endif
@@ -829,7 +900,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
*
* Clobbered:
*
- * the Z flag might be lost
+ * none
*
* Definition and user space usage example:
*
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 33e6cc2ffd3..6c90c50a9ee 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -72,7 +72,7 @@ no_work_pending:
ldr r1, [sp, #S_PSR] @ get calling cpsr
ldr lr, [sp, #S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
- ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
+ ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
mov r0, r0
add sp, sp, #S_FRAME_SIZE - S_PC
movs pc, lr @ return & move spsr_svc into cpsr
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
new file mode 100644
index 00000000000..d51bc8b6055
--- /dev/null
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -0,0 +1,1529 @@
+/*
+ * arch/arm/kernel/kprobes-decode.c
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * We do not have hardware single-stepping on ARM, This
+ * effort is further complicated by the ARM not having a
+ * "next PC" register. Instructions that change the PC
+ * can't be safely single-stepped in a MP environment, so
+ * we have a lot of work to do:
+ *
+ * In the prepare phase:
+ * *) If it is an instruction that does anything
+ * with the CPU mode, we reject it for a kprobe.
+ * (This is out of laziness rather than need. The
+ * instructions could be simulated.)
+ *
+ * *) Otherwise, decode the instruction rewriting its
+ * registers to take fixed, ordered registers and
+ * setting a handler for it to run the instruction.
+ *
+ * In the execution phase by an instruction's handler:
+ *
+ * *) If the PC is written to by the instruction, the
+ * instruction must be fully simulated in software.
+ * If it is a conditional instruction, the handler
+ * will use insn[0] to copy its condition code to
+ * set r0 to 1 and insn[1] to "mov pc, lr" to return.
+ *
+ * *) Otherwise, a modified form of the instruction is
+ * directly executed. Its handler calls the
+ * instruction in insn[0]. In insn[1] is a
+ * "mov pc, lr" to return.
+ *
+ * Before calling, load up the reordered registers
+ * from the original instruction's registers. If one
+ * of the original input registers is the PC, compute
+ * and adjust the appropriate input register.
+ *
+ * After call completes, copy the output registers to
+ * the original instruction's original registers.
+ *
+ * We don't use a real breakpoint instruction since that
+ * would have us in the kernel go from SVC mode to SVC
+ * mode losing the link register. Instead we use an
+ * undefined instruction. To simplify processing, the
+ * undefined instruction used for kprobes must be reserved
+ * exclusively for kprobes use.
+ *
+ * TODO: ifdef out some instruction decoding based on architecture.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+
+#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
+
+#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
+
+#define PSR_fs (PSR_f|PSR_s)
+
+#define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */
+#define SET_R0_TRUE_INSTRUCTION 0xe3a00001 /* mov r0, #1 */
+
+#define truecc_insn(insn) (((insn) & 0xf0000000) | \
+ (SET_R0_TRUE_INSTRUCTION & 0x0fffffff))
+
+typedef long (insn_0arg_fn_t)(void);
+typedef long (insn_1arg_fn_t)(long);
+typedef long (insn_2arg_fn_t)(long, long);
+typedef long (insn_3arg_fn_t)(long, long, long);
+typedef long (insn_4arg_fn_t)(long, long, long, long);
+typedef long long (insn_llret_0arg_fn_t)(void);
+typedef long long (insn_llret_3arg_fn_t)(long, long, long);
+typedef long long (insn_llret_4arg_fn_t)(long, long, long, long);
+
+union reg_pair {
+ long long dr;
+#ifdef __LITTLE_ENDIAN
+ struct { long r0, r1; };
+#else
+ struct { long r1, r0; };
+#endif
+};
+
+/*
+ * For STR and STM instructions, an ARM core may choose to use either
+ * a +8 or a +12 displacement from the current instruction's address.
+ * Whichever value is chosen for a given core, it must be the same for
+ * both instructions and may not change. This function measures it.
+ */
+
+static int str_pc_offset;
+
+static void __init find_str_pc_offset(void)
+{
+ int addr, scratch, ret;
+
+ __asm__ (
+ "sub %[ret], pc, #4 \n\t"
+ "str pc, %[addr] \n\t"
+ "ldr %[scr], %[addr] \n\t"
+ "sub %[ret], %[scr], %[ret] \n\t"
+ : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
+
+ str_pc_offset = ret;
+}
+
+/*
+ * The insnslot_?arg_r[w]flags() functions below are to keep the
+ * msr -> *fn -> mrs instruction sequences indivisible so that
+ * the state of the CPSR flags aren't inadvertently modified
+ * just before or just after the call.
+ */
+
+static inline long __kprobes
+insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn)
+{
+ register long ret asm("r0");
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret)
+ : [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ return ret;
+}
+
+static inline long long __kprobes
+insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn)
+{
+ register long ret0 asm("r0");
+ register long ret1 asm("r1");
+ union reg_pair fnr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret0), "=r" (ret1)
+ : [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ fnr.r0 = ret0;
+ fnr.r1 = ret1;
+ return fnr.dr;
+}
+
+static inline long __kprobes
+insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long ret asm("r0");
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret)
+ : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long ret asm("r0");
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret)
+ : "0" (rr0), "r" (rr1),
+ [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long ret asm("r0");
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret)
+ : "0" (rr0), "r" (rr1), "r" (rr2),
+ [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ return ret;
+}
+
+static inline long long __kprobes
+insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr,
+ insn_llret_3arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long ret0 asm("r0");
+ register long ret1 asm("r1");
+ union reg_pair fnr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret0), "=r" (ret1)
+ : "0" (rr0), "r" (rr1), "r" (rr2),
+ [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ fnr.r0 = ret0;
+ fnr.r1 = ret1;
+ return fnr.dr;
+}
+
+static inline long __kprobes
+insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr,
+ insn_4arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long rr3 asm("r3") = r3;
+ register long ret asm("r0");
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ : "=r" (ret)
+ : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
+ [cpsr] "r" (cpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long ret asm("r0");
+ long oldcpsr = *cpsr;
+ long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : "=r" (ret), [newcpsr] "=r" (newcpsr)
+ : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long ret asm("r0");
+ long oldcpsr = *cpsr;
+ long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : "=r" (ret), [newcpsr] "=r" (newcpsr)
+ : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr,
+ insn_3arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long ret asm("r0");
+ long oldcpsr = *cpsr;
+ long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : "=r" (ret), [newcpsr] "=r" (newcpsr)
+ : "0" (rr0), "r" (rr1), "r" (rr2),
+ [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
+ return ret;
+}
+
+static inline long __kprobes
+insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
+ insn_4arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long rr3 asm("r3") = r3;
+ register long ret asm("r0");
+ long oldcpsr = *cpsr;
+ long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : "=r" (ret), [newcpsr] "=r" (newcpsr)
+ : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
+ [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
+ return ret;
+}
+
+static inline long long __kprobes
+insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
+ insn_llret_4arg_fn_t *fn)
+{
+ register long rr0 asm("r0") = r0;
+ register long rr1 asm("r1") = r1;
+ register long rr2 asm("r2") = r2;
+ register long rr3 asm("r3") = r3;
+ register long ret0 asm("r0");
+ register long ret1 asm("r1");
+ long oldcpsr = *cpsr;
+ long newcpsr;
+ union reg_pair fnr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[fn] \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr)
+ : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
+ [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
+ : "lr", "cc"
+ );
+ *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
+ fnr.r0 = ret0;
+ fnr.r1 = ret1;
+ return fnr.dr;
+}
+
+/*
+ * To avoid the complications of mimicing single-stepping on a
+ * processor without a Next-PC or a single-step mode, and to
+ * avoid having to deal with the side-effects of boosting, we
+ * simulate or emulate (almost) all ARM instructions.
+ *
+ * "Simulation" is where the instruction's behavior is duplicated in
+ * C code. "Emulation" is where the original instruction is rewritten
+ * and executed, often by altering its registers.
+ *
+ * By having all behavior of the kprobe'd instruction completed before
+ * returning from the kprobe_handler(), all locks (scheduler and
+ * interrupt) can safely be released. There is no need for secondary
+ * breakpoints, no race with MP or preemptable kernels, nor having to
+ * clean up resources counts at a later time impacting overall system
+ * performance. By rewriting the instruction, only the minimum registers
+ * need to be loaded and saved back optimizing performance.
+ *
+ * Calling the insnslot_*_rwflags version of a function doesn't hurt
+ * anything even when the CPSR flags aren't updated by the
+ * instruction. It's just a little slower in return for saving
+ * a little space by not having a duplicate function that doesn't
+ * update the flags. (The same optimization can be said for
+ * instructions that do or don't perform register writeback)
+ * Also, instructions can either read the flags, only write the
+ * flags, or read and write the flags. To save combinations
+ * rather than for sheer performance, flag functions just assume
+ * read and write of flags.
+ */
+
+static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ long iaddr = (long)p->addr;
+ int disp = branch_displacement(insn);
+
+ if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
+ return;
+
+ if (insn & (1 << 24))
+ regs->ARM_lr = iaddr + 4;
+
+ regs->ARM_pc = iaddr + 8 + disp;
+}
+
+static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
+{
+ kprobe_opcode_t insn = p->opcode;
+ long iaddr = (long)p->addr;
+ int disp = branch_displacement(insn);
+
+ regs->ARM_lr = iaddr + 4;
+ regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
+ regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rm = insn & 0xf;
+ long rmv = regs->uregs[rm];
+
+ if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
+ return;
+
+ if (insn & (1 << 5))
+ regs->ARM_lr = (long)p->addr + 4;
+
+ regs->ARM_pc = rmv & ~0x1;
+ regs->ARM_cpsr &= ~PSR_T_BIT;
+ if (rmv & 0x1)
+ regs->ARM_cpsr |= PSR_T_BIT;
+}
+
+static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rn = (insn >> 16) & 0xf;
+ int lbit = insn & (1 << 20);
+ int wbit = insn & (1 << 21);
+ int ubit = insn & (1 << 23);
+ int pbit = insn & (1 << 24);
+ long *addr = (long *)regs->uregs[rn];
+ int reg_bit_vector;
+ int reg_count;
+
+ if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
+ return;
+
+ reg_count = 0;
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ reg_bit_vector &= (reg_bit_vector - 1);
+ ++reg_count;
+ }
+
+ if (!ubit)
+ addr -= reg_count;
+ addr += (!pbit ^ !ubit);
+
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ int reg = __ffs(reg_bit_vector);
+ reg_bit_vector &= (reg_bit_vector - 1);
+ if (lbit)
+ regs->uregs[reg] = *addr++;
+ else
+ *addr++ = regs->uregs[reg];
+ }
+
+ if (wbit) {
+ if (!ubit)
+ addr -= reg_count;
+ addr -= (!pbit ^ !ubit);
+ regs->uregs[rn] = (long)addr;
+ }
+}
+
+static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+
+ if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
+ return;
+
+ regs->ARM_pc = (long)p->addr + str_pc_offset;
+ simulate_ldm1stm1(p, regs);
+ regs->ARM_pc = (long)p->addr + 4;
+}
+
+static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
+{
+ regs->uregs[12] = regs->uregs[13];
+}
+
+static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rn = (insn >> 16) & 0xf;
+ long rnv = regs->uregs[rn];
+
+ /* Save Rn in case of writeback. */
+ regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf; /* rm may be invalid, don't care. */
+
+ /* Not following the C calling convention here, so need asm(). */
+ __asm__ __volatile__ (
+ "ldr r0, %[rn] \n\t"
+ "ldr r1, %[rm] \n\t"
+ "msr cpsr_fs, %[cpsr]\n\t"
+ "mov lr, pc \n\t"
+ "mov pc, %[i_fn] \n\t"
+ "str r0, %[rn] \n\t" /* in case of writeback */
+ "str r2, %[rd0] \n\t"
+ "str r3, %[rd1] \n\t"
+ : [rn] "+m" (regs->uregs[rn]),
+ [rd0] "=m" (regs->uregs[rd]),
+ [rd1] "=m" (regs->uregs[rd+1])
+ : [rm] "m" (regs->uregs[rm]),
+ [cpsr] "r" (regs->ARM_cpsr),
+ [i_fn] "r" (i_fn)
+ : "r0", "r1", "r2", "r3", "lr", "cc"
+ );
+}
+
+static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ long rnv = regs->uregs[rn];
+ long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+
+ regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
+ regs->uregs[rd+1],
+ regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ union reg_pair fnr;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ long rdv;
+ long rnv = regs->uregs[rn];
+ long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+ long cpsr = regs->ARM_cpsr;
+
+ fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
+ regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
+ rdv = fnr.r1;
+
+ if (rd == 15) {
+#if __LINUX_ARM_ARCH__ >= 5
+ cpsr &= ~PSR_T_BIT;
+ if (rdv & 0x1)
+ cpsr |= PSR_T_BIT;
+ regs->ARM_cpsr = cpsr;
+ rdv &= ~0x1;
+#else
+ rdv &= ~0x2;
+#endif
+ }
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ long iaddr = (long)p->addr;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
+ long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
+ long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+
+ /* Save Rn in case of writeback. */
+ regs->uregs[rn] =
+ insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ union reg_pair fnr;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+
+ fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn);
+ regs->uregs[rn] = fnr.r0;
+ regs->uregs[rd] = fnr.r1;
+}
+
+static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ long rnv = regs->uregs[rn];
+ long rdv = regs->uregs[rd];
+
+ insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+ long rmv = regs->uregs[rm];
+
+ /* Writes Q flag */
+ regs->uregs[rd] = insnslot_1arg_rwflags(rmv, &regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ long rnv = regs->uregs[rn];
+ long rmv = regs->uregs[rm];
+
+ /* Reads GE bits */
+ regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
+
+ insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+
+ regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int ird = (insn >> 12) & 0xf;
+
+ insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rn = (insn >> 16) & 0xf;
+ long rnv = regs->uregs[rn];
+
+ insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+ long rmv = regs->uregs[rm];
+
+ regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ long rnv = regs->uregs[rn];
+ long rmv = regs->uregs[rm];
+
+ regs->uregs[rd] =
+ insnslot_2arg_rwflags(rnv, rmv, &regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 16) & 0xf;
+ int rn = (insn >> 12) & 0xf;
+ int rs = (insn >> 8) & 0xf;
+ int rm = insn & 0xf;
+ long rnv = regs->uregs[rn];
+ long rsv = regs->uregs[rs];
+ long rmv = regs->uregs[rm];
+
+ regs->uregs[rd] =
+ insnslot_3arg_rwflags(rnv, rsv, rmv, &regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 16) & 0xf;
+ int rs = (insn >> 8) & 0xf;
+ int rm = insn & 0xf;
+ long rsv = regs->uregs[rs];
+ long rmv = regs->uregs[rm];
+
+ regs->uregs[rd] =
+ insnslot_2arg_rwflags(rsv, rmv, &regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ union reg_pair fnr;
+ int rdhi = (insn >> 16) & 0xf;
+ int rdlo = (insn >> 12) & 0xf;
+ int rs = (insn >> 8) & 0xf;
+ int rm = insn & 0xf;
+ long rsv = regs->uregs[rs];
+ long rmv = regs->uregs[rm];
+
+ fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi],
+ regs->uregs[rdlo], rsv, rmv,
+ &regs->ARM_cpsr, i_fn);
+ regs->uregs[rdhi] = fnr.r0;
+ regs->uregs[rdlo] = fnr.r1;
+}
+
+static void __kprobes
+emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
+
+ regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
+
+ regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ long ppc = (long)p->addr + 8;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
+ int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
+ int rm = insn & 0xf;
+ long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+ long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+ long rsv = regs->uregs[rs];
+
+ regs->uregs[rd] =
+ insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn);
+}
+
+static void __kprobes
+emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs)
+{
+ insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
+ kprobe_opcode_t insn = p->opcode;
+ long ppc = (long)p->addr + 8;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
+ int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
+ int rm = insn & 0xf;
+ long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+ long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+ long rsv = regs->uregs[rs];
+
+ regs->uregs[rd] =
+ insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ int ibit = (insn & (1 << 26)) ? 25 : 22;
+
+ insn &= 0xfff00fff;
+ insn |= 0x00001000; /* Rn = r0, Rd = r1 */
+ if (insn & (1 << ibit)) {
+ insn &= ~0xf;
+ insn |= 2; /* Rm = r2 */
+ }
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rd12rm0;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ insn &= 0xffff0fff; /* Rd = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rd12;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn,
+ struct arch_specific_insn *asi)
+{
+ insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
+ insn |= 0x00000001; /* Rm = r1 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rd12rn16rm0_rwflags;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn,
+ struct arch_specific_insn *asi)
+{
+ insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */
+ insn |= 0x00000001; /* Rm = r1 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rd16rs8rm0_rwflags;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn,
+ struct arch_specific_insn *asi)
+{
+ insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */
+ insn |= 0x00000102; /* Rs = r1, Rm = r2 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
+ struct arch_specific_insn *asi)
+{
+ insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */
+ insn |= 0x00001203; /* Rs = r2, Rm = r3 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags;
+ return INSN_GOOD;
+}
+
+/*
+ * For the instruction masking and comparisons in all the "space_*"
+ * functions below, Do _not_ rearrange the order of tests unless
+ * you're very, very sure of what you are doing. For the sake of
+ * efficiency, the masks for some tests sometimes assume other test
+ * have been done prior to them so the number of patterns to test
+ * for an instruction set can be as broad as possible to reduce the
+ * number of tests needed.
+ */
+
+static enum kprobe_insn __kprobes
+space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */
+ /* RFE : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */
+ /* SRS : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */
+ if ((insn & 0xfff30020) == 0xf1020000 ||
+ (insn & 0xfe500f00) == 0xf8100a00 ||
+ (insn & 0xfe5f0f00) == 0xf84d0500)
+ return INSN_REJECTED;
+
+ /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */
+ if ((insn & 0xfd700000) == 0xf4500000) {
+ insn &= 0xfff0ffff; /* Rn = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_rn16;
+ return INSN_GOOD;
+ }
+
+ /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */
+ if ((insn & 0xfe000000) == 0xfa000000) {
+ asi->insn_handler = simulate_blx1;
+ return INSN_GOOD_NO_SLOT;
+ }
+
+ /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
+ /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+ if ((insn & 0xffff00f0) == 0xf1010000 ||
+ (insn & 0xff000010) == 0xfe000000) {
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_none;
+ return INSN_GOOD;
+ }
+
+ /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
+ /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
+ if ((insn & 0xffe00000) == 0xfc400000) {
+ insn &= 0xfff00fff; /* Rn = r0 */
+ insn |= 0x00001000; /* Rd = r1 */
+ asi->insn[0] = insn;
+ asi->insn_handler =
+ (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr;
+ return INSN_GOOD;
+ }
+
+ /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+ /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+ if ((insn & 0xfe000000) == 0xfc000000) {
+ insn &= 0xfff0ffff; /* Rn = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_ldcstc;
+ return INSN_GOOD;
+ }
+
+ /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+ /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+ insn &= 0xffff0fff; /* Rd = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */
+ if ((insn & 0x0f900010) == 0x01000000) {
+
+ /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
+ /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
+ if ((insn & 0x0ff000f0) == 0x01200020 ||
+ (insn & 0x0fb000f0) == 0x01200000)
+ return INSN_REJECTED;
+
+ /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */
+ if ((insn & 0x0fb00010) == 0x01000000)
+ return prep_emulate_rd12(insn, asi);
+
+ /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
+ if ((insn & 0x0ff00090) == 0x01400080)
+ return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
+
+ /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
+ /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
+ if ((insn & 0x0ff000b0) == 0x012000a0 ||
+ (insn & 0x0ff00090) == 0x01600080)
+ return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+
+ /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */
+ /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */
+ return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+
+ }
+
+ /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */
+ else if ((insn & 0x0f900090) == 0x01000010) {
+
+ /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
+ if ((insn & 0xfff000f0) == 0xe1200070)
+ return INSN_REJECTED;
+
+ /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
+ /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
+ if ((insn & 0x0ff000d0) == 0x01200010) {
+ asi->insn[0] = truecc_insn(insn);
+ asi->insn_handler = simulate_blx2bx;
+ return INSN_GOOD;
+ }
+
+ /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
+ if ((insn & 0x0ff000f0) == 0x01600010)
+ return prep_emulate_rd12rm0(insn, asi);
+
+ /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */
+ /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */
+ /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */
+ /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */
+ return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+ }
+
+ /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */
+ else if ((insn & 0x0f000090) == 0x00000090) {
+
+ /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */
+ /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */
+ /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */
+ /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */
+ /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */
+ /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */
+ /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */
+ /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */
+ /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */
+ /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */
+ /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */
+ /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */
+ /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */
+ if ((insn & 0x0fe000f0) == 0x00000090) {
+ return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+ } else if ((insn & 0x0fe000f0) == 0x00200090) {
+ return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+ } else {
+ return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
+ }
+ }
+
+ /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */
+ else if ((insn & 0x0e000090) == 0x00000090) {
+
+ /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */
+ /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */
+ /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
+ /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
+ /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */
+ /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */
+ /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */
+ /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */
+ /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */
+ /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */
+ if ((insn & 0x0fb000f0) == 0x01000090) {
+ /* SWP/SWPB */
+ return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+ } else if ((insn & 0x0e1000d0) == 0x00000d0) {
+ /* STRD/LDRD */
+ insn &= 0xfff00fff;
+ insn |= 0x00002000; /* Rn = r0, Rd = r2 */
+ if (insn & (1 << 22)) {
+ /* I bit */
+ insn &= ~0xf;
+ insn |= 1; /* Rm = r1 */
+ }
+ asi->insn[0] = insn;
+ asi->insn_handler =
+ (insn & (1 << 5)) ? emulate_strd : emulate_ldrd;
+ return INSN_GOOD;
+ }
+
+ return prep_emulate_ldr_str(insn, asi);
+ }
+
+ /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
+
+ /*
+ * ALU op with S bit and Rd == 15 :
+ * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
+ */
+ if ((insn & 0x0e10f000) == 0x0010f000)
+ return INSN_REJECTED;
+
+ /*
+ * "mov ip, sp" is the most common kprobe'd instruction by far.
+ * Check and optimize for it explicitly.
+ */
+ if (insn == 0xe1a0c00d) {
+ asi->insn_handler = simulate_mov_ipsp;
+ return INSN_GOOD_NO_SLOT;
+ }
+
+ /*
+ * Data processing: Immediate-shift / Register-shift
+ * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx
+ * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx
+ * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx
+ * *S (bit 20) updates condition codes
+ * ADC/SBC/RSC reads the C flag
+ */
+ insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */
+ insn |= 0x00000001; /* Rm = r1 */
+ if (insn & 0x010) {
+ insn &= 0xfffff0ff; /* register shift */
+ insn |= 0x00000200; /* Rs = r2 */
+ }
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
+ emulate_alu_rwflags : emulate_alu_rflags;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /*
+ * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
+ * Undef : cccc 0011 0x00 xxxx xxxx xxxx xxxx xxxx
+ * ALU op with S bit and Rd == 15 :
+ * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
+ */
+ if ((insn & 0x0f900000) == 0x03200000 || /* MSR & Undef */
+ (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
+ return INSN_REJECTED;
+
+ /*
+ * Data processing: 32-bit Immediate
+ * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
+ * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx
+ * *S (bit 20) updates condition codes
+ * ADC/SBC/RSC reads the C flag
+ */
+ insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
+ emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */
+ if ((insn & 0x0ff000f0) == 0x068000b0) {
+ insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
+ insn |= 0x00000001; /* Rm = r1 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_sel;
+ return INSN_GOOD;
+ }
+
+ /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */
+ /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */
+ /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */
+ /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */
+ if ((insn & 0x0fa00030) == 0x06a00010 ||
+ (insn & 0x0fb000f0) == 0x06a00030) {
+ insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_sat;
+ return INSN_GOOD;
+ }
+
+ /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
+ /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
+ /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
+ if ((insn & 0x0ff00070) == 0x06b00030 ||
+ (insn & 0x0ff000f0) == 0x06f000b0)
+ return prep_emulate_rd12rm0(insn, asi);
+
+ /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */
+ /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */
+ /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */
+ /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */
+ /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */
+ /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */
+ /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */
+ /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */
+ /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */
+ /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */
+ /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */
+ /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */
+ /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */
+ /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */
+ /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */
+ /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */
+ /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */
+ /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */
+ /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */
+ /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */
+ /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */
+ /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */
+ /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */
+ /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */
+ /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */
+ /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */
+ /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */
+ /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */
+ /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */
+ /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */
+ /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */
+ /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */
+ /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */
+ /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */
+ /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */
+ /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */
+ /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */
+ /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */
+ /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */
+ /* SXTB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */
+ /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */
+ /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */
+ /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */
+ /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */
+ /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */
+ return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
+ if ((insn & 0x0ff000f0) == 0x03f000f0)
+ return INSN_REJECTED;
+
+ /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
+ /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
+ if ((insn & 0x0ff000f0) == 0x07800010)
+ return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+
+ /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
+ /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
+ if ((insn & 0x0ff00090) == 0x07400010)
+ return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
+
+ /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */
+ /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */
+ /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */
+ /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */
+ if ((insn & 0x0ff00090) == 0x07000010 ||
+ (insn & 0x0ff000d0) == 0x07500010 ||
+ (insn & 0x0ff000d0) == 0x075000d0)
+ return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+
+ /* SMUSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx : */
+ /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
+ /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */
+ return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
+ /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
+ /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */
+ /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */
+ /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
+ /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
+ return prep_emulate_ldr_str(insn, asi);
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */
+ /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
+ if ((insn & 0x0e708000) == 0x85000000 ||
+ (insn & 0x0e508000) == 0x85010000)
+ return INSN_REJECTED;
+
+ /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+ /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
+ asi->insn[0] = truecc_insn(insn);
+ asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */
+ simulate_stm1_pc : simulate_ldm1stm1;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
+ /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
+ asi->insn[0] = truecc_insn(insn);
+ asi->insn_handler = simulate_bbl;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
+ /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
+ insn &= 0xfff00fff;
+ insn |= 0x00001000; /* Rn = r0, Rd = r1 */
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+ /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+ insn &= 0xfff0ffff; /* Rn = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_ldcstc;
+ return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
+ /* SWI : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
+ if ((insn & 0xfff000f0) == 0xe1200070 ||
+ (insn & 0x0f000000) == 0x0f000000)
+ return INSN_REJECTED;
+
+ /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+ if ((insn & 0x0f000010) == 0x0e000000) {
+ asi->insn[0] = insn;
+ asi->insn_handler = emulate_none;
+ return INSN_GOOD;
+ }
+
+ /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+ /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+ insn &= 0xffff0fff; /* Rd = r0 */
+ asi->insn[0] = insn;
+ asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12;
+ return INSN_GOOD;
+}
+
+/* Return:
+ * INSN_REJECTED If instruction is one not allowed to kprobe,
+ * INSN_GOOD If instruction is supported and uses instruction slot,
+ * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
+ *
+ * For instructions we don't want to kprobe (INSN_REJECTED return result):
+ * These are generally ones that modify the processor state making
+ * them "hard" to simulate such as switches processor modes or
+ * make accesses in alternate modes. Any of these could be simulated
+ * if the work was put into it, but low return considering they
+ * should also be very rare.
+ */
+enum kprobe_insn __kprobes
+arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+ asi->insn[1] = KPROBE_RETURN_INSTRUCTION;
+
+ if ((insn & 0xf0000000) == 0xf0000000) {
+
+ return space_1111(insn, asi);
+
+ } else if ((insn & 0x0e000000) == 0x00000000) {
+
+ return space_cccc_000x(insn, asi);
+
+ } else if ((insn & 0x0e000000) == 0x02000000) {
+
+ return space_cccc_001x(insn, asi);
+
+ } else if ((insn & 0x0f000010) == 0x06000010) {
+
+ return space_cccc_0110__1(insn, asi);
+
+ } else if ((insn & 0x0f000010) == 0x07000010) {
+
+ return space_cccc_0111__1(insn, asi);
+
+ } else if ((insn & 0x0c000000) == 0x04000000) {
+
+ return space_cccc_01xx(insn, asi);
+
+ } else if ((insn & 0x0e000000) == 0x08000000) {
+
+ return space_cccc_100x(insn, asi);
+
+ } else if ((insn & 0x0e000000) == 0x0a000000) {
+
+ return space_cccc_101x(insn, asi);
+
+ } else if ((insn & 0x0fe00000) == 0x0c400000) {
+
+ return space_cccc_1100_010x(insn, asi);
+
+ } else if ((insn & 0x0e000000) == 0x0c400000) {
+
+ return space_cccc_110x(insn, asi);
+
+ }
+
+ return space_cccc_111x(insn, asi);
+}
+
+void __init arm_kprobe_decode_init(void)
+{
+ find_str_pc_offset();
+}
+
+
+/*
+ * All ARM instructions listed below.
+ *
+ * Instructions and their general purpose registers are given.
+ * If a particular register may not use R15, it is prefixed with a "!".
+ * If marked with a "*" means the value returned by reading R15
+ * is implementation defined.
+ *
+ * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ
+ * TST: Rd, Rn, Rm, !Rs
+ * BX: Rm
+ * BLX(2): !Rm
+ * BX: Rm (R15 legal, but discouraged)
+ * BXJ: !Rm,
+ * CLZ: !Rd, !Rm
+ * CPY: Rd, Rm
+ * LDC/2,STC/2 immediate offset & unindex: Rn
+ * LDC/2,STC/2 immediate pre/post-indexed: !Rn
+ * LDM(1/3): !Rn, register_list
+ * LDM(2): !Rn, !register_list
+ * LDR,STR,PLD immediate offset: Rd, Rn
+ * LDR,STR,PLD register offset: Rd, Rn, !Rm
+ * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm
+ * LDR,STR immediate pre/post-indexed: Rd, !Rn
+ * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm
+ * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm
+ * LDRB,STRB immediate offset: !Rd, Rn
+ * LDRB,STRB register offset: !Rd, Rn, !Rm
+ * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm
+ * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn
+ * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm
+ * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm
+ * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn
+ * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm
+ * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm
+ * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn
+ * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm
+ * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn
+ * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm
+ * LDREX: !Rd, !Rn
+ * MCR/2: !Rd
+ * MCRR/2,MRRC/2: !Rd, !Rn
+ * MLA: !Rd, !Rn, !Rm, !Rs
+ * MOV: Rd
+ * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register)
+ * MRS,MSR: !Rd
+ * MUL: !Rd, !Rm, !Rs
+ * PKH{BT,TB}: !Rd, !Rn, !Rm
+ * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn
+ * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn
+ * REV/16/SH: !Rd, !Rm
+ * RFE: !Rn
+ * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm
+ * SEL: !Rd, !Rn, !Rm
+ * SMLA<x><y>,SMLA{D,W<y>},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs
+ * SMLAL<x><y>,SMLA{D,LD},SMLSLD,SMMULL,SMULW<y>: !RdHi, !RdLo, !Rm, !Rs
+ * SMMUL,SMUAD,SMUL<x><y>,SMUSD: !Rd, !Rm, !Rs
+ * SSAT/16: !Rd, !Rm
+ * STM(1/2): !Rn, register_list* (R15 in reg list not recommended)
+ * STRT immediate pre/post-indexed: Rd*, !Rn
+ * STRT register pre/post-indexed: Rd*, !Rn, !Rm
+ * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm
+ * STREX: !Rd, !Rn, !Rm
+ * SWP/B: !Rd, !Rn, !Rm
+ * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm
+ * {S,U}XT{B,B16,H}: !Rd, !Rm
+ * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs
+ * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs
+ *
+ * May transfer control by writing R15 (possible mode changes or alternate
+ * mode accesses marked by "*"):
+ * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY,
+ * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI*
+ *
+ * Instructions that do not take general registers, nor transfer control:
+ * CDP/2, SETEND, SRS*
+ */
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
new file mode 100644
index 00000000000..a22a98c43ca
--- /dev/null
+++ b/arch/arm/kernel/kprobes.c
@@ -0,0 +1,447 @@
+/*
+ * arch/arm/kernel/kprobes.c
+ *
+ * Kprobes on ARM
+ *
+ * Abhishek Sagar <sagar.abhishek@gmail.com>
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * Nicolas Pitre <nico@marvell.com>
+ * Copyright (C) 2007 Marvell Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include <asm/traps.h>
+#include <asm/cacheflush.h>
+
+#define MIN_STACK_SIZE(addr) \
+ min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
+
+#define flush_insns(addr, cnt) \
+ flush_icache_range((unsigned long)(addr), \
+ (unsigned long)(addr) + \
+ sizeof(kprobe_opcode_t) * (cnt))
+
+/* Used as a marker in ARM_pc to note when we're in a jprobe. */
+#define JPROBE_MAGIC_ADDR 0xffffffff
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ kprobe_opcode_t insn;
+ kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
+ unsigned long addr = (unsigned long)p->addr;
+ int is;
+
+ if (addr & 0x3 || in_exception_text(addr))
+ return -EINVAL;
+
+ insn = *p->addr;
+ p->opcode = insn;
+ p->ainsn.insn = tmp_insn;
+
+ switch (arm_kprobe_decode_insn(insn, &p->ainsn)) {
+ case INSN_REJECTED: /* not supported */
+ return -EINVAL;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ for (is = 0; is < MAX_INSN_SIZE; ++is)
+ p->ainsn.insn[is] = tmp_insn[is];
+ flush_insns(&p->ainsn.insn, MAX_INSN_SIZE);
+ break;
+
+ case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
+ p->ainsn.insn = NULL;
+ break;
+ }
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = KPROBE_BREAKPOINT_INSTRUCTION;
+ flush_insns(p->addr, 1);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insns(p->addr, 1);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ mutex_lock(&kprobe_mutex);
+ free_insn_slot(p->ainsn.insn, 0);
+ mutex_unlock(&kprobe_mutex);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ regs->ARM_pc += 4;
+ p->ainsn.insn_handler(p, regs);
+}
+
+/*
+ * Called with IRQs disabled. IRQs must remain disabled from that point
+ * all the way until processing this kprobe is complete. The current
+ * kprobes implementation cannot process more than one nested level of
+ * kprobe, and that level is reserved for user kprobe handlers, so we can't
+ * risk encountering a new kprobe in an interrupt handler.
+ */
+void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur;
+ struct kprobe_ctlblk *kcb;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc;
+
+ kcb = get_kprobe_ctlblk();
+ cur = kprobe_running();
+ p = get_kprobe(addr);
+
+ if (p) {
+ if (cur) {
+ /* Kprobe is pending, so we're recursing. */
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /* A pre- or post-handler probe got us here. */
+ kprobes_inc_nmissed_count(p);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ singlestep(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ break;
+ default:
+ /* impossible cases */
+ BUG();
+ }
+ } else {
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it prepped
+ * for calling the break_handler below on re-entry,
+ * so get out doing nothing more here.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ singlestep(p, regs, kcb);
+ if (p->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ reset_current_kprobe();
+ }
+ }
+ } else if (cur) {
+ /* We probably hit a jprobe. Call its break handler. */
+ if (cur->break_handler && cur->break_handler(cur, regs)) {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ singlestep(cur, regs, kcb);
+ if (cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+ }
+ reset_current_kprobe();
+ } else {
+ /*
+ * The probe was removed and a race is in progress.
+ * There is nothing we can do about it. Let's restart
+ * the instruction. By the time we can restart, the
+ * real instruction will be there.
+ */
+ }
+}
+
+int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
+{
+ kprobe_handler(regs);
+ return 0;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the PC to point back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->ARM_pc = (long)cur->addr;
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ reset_current_kprobe();
+ }
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ /*
+ * notify_die() is currently never called on ARM,
+ * so this callback is currently empty.
+ */
+ return NOTIFY_DONE;
+}
+
+/*
+ * When a retprobed function returns, trampoline_handler() is called,
+ * calling the kretprobe's handler. We construct a struct pt_regs to
+ * give a view of registers r0-r11 to the user return-handler. This is
+ * not a complete pt_regs structure, but that should be plenty sufficient
+ * for kretprobe handlers which should normally be interested in r0 only
+ * anyway.
+ */
+static void __attribute__((naked)) __kprobes kretprobe_trampoline(void)
+{
+ __asm__ __volatile__ (
+ "stmdb sp!, {r0 - r11} \n\t"
+ "mov r0, sp \n\t"
+ "bl trampoline_handler \n\t"
+ "mov lr, r0 \n\t"
+ "ldmia sp!, {r0 - r11} \n\t"
+ "mov pc, lr \n\t"
+ : : : "memory");
+}
+
+/* Called from kretprobe_trampoline */
+static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head(current);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * a return probe installed on them, and/or more than one return
+ * probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler) {
+ __get_cpu_var(current_kprobe) = &ri->rp->kp;
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->rp->handler(ri, regs);
+ __get_cpu_var(current_kprobe) = NULL;
+ }
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+
+ hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ return (void *)orig_ret_address;
+}
+
+/* Called with kretprobe_lock held. */
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
+
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ long sp_addr = regs->ARM_sp;
+
+ kcb->jprobe_saved_regs = *regs;
+ memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+ regs->ARM_pc = (long)jp->entry;
+ regs->ARM_cpsr |= PSR_I_BIT;
+ preempt_disable();
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ __asm__ __volatile__ (
+ /*
+ * Setup an empty pt_regs. Fill SP and PC fields as
+ * they're needed by longjmp_break_handler.
+ */
+ "sub sp, %0, %1 \n\t"
+ "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
+ "str %0, [sp, %2] \n\t"
+ "str r0, [sp, %3] \n\t"
+ "mov r0, sp \n\t"
+ "bl kprobe_handler \n\t"
+
+ /*
+ * Return to the context saved by setjmp_pre_handler
+ * and restored by longjmp_break_handler.
+ */
+ "ldr r0, [sp, %4] \n\t"
+ "msr cpsr_cxsf, r0 \n\t"
+ "ldmia sp, {r0 - pc} \n\t"
+ :
+ : "r" (kcb->jprobe_saved_regs.ARM_sp),
+ "I" (sizeof(struct pt_regs)),
+ "J" (offsetof(struct pt_regs, ARM_sp)),
+ "J" (offsetof(struct pt_regs, ARM_pc)),
+ "J" (offsetof(struct pt_regs, ARM_cpsr))
+ : "memory", "cc");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ long stack_addr = kcb->jprobe_saved_regs.ARM_sp;
+ long orig_sp = regs->ARM_sp;
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+
+ if (regs->ARM_pc == JPROBE_MAGIC_ADDR) {
+ if (orig_sp != stack_addr) {
+ struct pt_regs *saved_regs =
+ (struct pt_regs *)kcb->jprobe_saved_regs.ARM_sp;
+ printk("current sp %lx does not match saved sp %lx\n",
+ orig_sp, stack_addr);
+ printk("Saved registers for jprobe %p\n", jp);
+ show_regs(saved_regs);
+ printk("Current registers\n");
+ show_regs(regs);
+ BUG();
+ }
+ *regs = kcb->jprobe_saved_regs;
+ memcpy((void *)stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
+ preempt_enable_no_resched();
+ return 1;
+ }
+ return 0;
+}
+
+static struct undef_hook kprobes_break_hook = {
+ .instr_mask = 0xffffffff,
+ .instr_val = KPROBE_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+int __init arch_init_kprobes()
+{
+ arm_kprobe_decode_init();
+ register_undef_hook(&kprobes_break_hook);
+ return 0;
+}
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 863c66454f2..db8f54a3451 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -21,6 +21,7 @@ extern void setup_mm_for_reboot(char mode);
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
+extern unsigned long kexec_boot_atags;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
@@ -62,6 +63,7 @@ void machine_kexec(struct kimage *image)
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
+ kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET;
/* copy our kernel relocation code to the control code page */
memcpy(reboot_code_buffer,
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 062c111c572..61930eb0902 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -7,23 +7,6 @@
.globl relocate_new_kernel
relocate_new_kernel:
- /* Move boot params back to where the kernel expects them */
-
- ldr r0,kexec_boot_params_address
- teq r0,#0
- beq 8f
-
- ldr r1,kexec_boot_params_copy
- mov r6,#KEXEC_BOOT_PARAMS_SIZE/4
-7:
- ldr r5,[r1],#4
- str r5,[r0],#4
- subs r6,r6,#1
- bne 7b
-
-8:
- /* Boot params moved, now go on with the kernel */
-
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
@@ -67,7 +50,7 @@ relocate_new_kernel:
mov lr,r1
mov r0,#0
ldr r1,kexec_mach_type
- ldr r2,kexec_boot_params_address
+ ldr r2,kexec_boot_atags
mov pc,lr
.globl kexec_start_address
@@ -82,14 +65,9 @@ kexec_indirection_page:
kexec_mach_type:
.long 0x0
- /* phy addr where new kernel will expect to find boot params */
- .globl kexec_boot_params_address
-kexec_boot_params_address:
- .long 0x0
-
- /* phy addr where old kernel put a copy of orig boot params */
- .globl kexec_boot_params_copy
-kexec_boot_params_copy:
+ /* phy addr of the atags for the new kernel */
+ .globl kexec_boot_atags
+kexec_boot_atags:
.long 0x0
relocate_new_kernel_end:
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index bf56eb337df..d3941a7b045 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
-#include <linux/kexec.h>
#include <asm/cpu.h>
#include <asm/elf.h>
@@ -39,6 +38,7 @@
#include <asm/mach/time.h>
#include "compat.h"
+#include "atags.h"
#ifndef MEM_SIZE
#define MEM_SIZE (16*1024*1024)
@@ -62,6 +62,7 @@ extern int root_mountflags;
extern void _stext, _text, _etext, __data_start, _edata, _end;
unsigned int processor_id;
+EXPORT_SYMBOL(processor_id);
unsigned int __machine_arch_type;
EXPORT_SYMBOL(__machine_arch_type);
@@ -784,23 +785,6 @@ static int __init customize_machine(void)
}
arch_initcall(customize_machine);
-#ifdef CONFIG_KEXEC
-
-/* Physical addr of where the boot params should be for this machine */
-extern unsigned long kexec_boot_params_address;
-
-/* Physical addr of the buffer into which the boot params are copied */
-extern unsigned long kexec_boot_params_copy;
-
-/* Pointer to the boot params buffer, for manipulation and display */
-unsigned long kexec_boot_params;
-EXPORT_SYMBOL(kexec_boot_params);
-
-/* The buffer itself - make sure it is sized correctly */
-static unsigned long kexec_boot_params_buf[(KEXEC_BOOT_PARAMS_SIZE + 3) / 4];
-
-#endif
-
void __init setup_arch(char **cmdline_p)
{
struct tag *tags = (struct tag *)&init_tags;
@@ -819,18 +803,6 @@ void __init setup_arch(char **cmdline_p)
else if (mdesc->boot_params)
tags = phys_to_virt(mdesc->boot_params);
-#ifdef CONFIG_KEXEC
- kexec_boot_params_copy = virt_to_phys(kexec_boot_params_buf);
- kexec_boot_params = (unsigned long)kexec_boot_params_buf;
- if (__atags_pointer) {
- kexec_boot_params_address = __atags_pointer;
- memcpy((void *)kexec_boot_params, tags, KEXEC_BOOT_PARAMS_SIZE);
- } else if (mdesc->boot_params) {
- kexec_boot_params_address = mdesc->boot_params;
- memcpy((void *)kexec_boot_params, tags, KEXEC_BOOT_PARAMS_SIZE);
- }
-#endif
-
/*
* If we have the old style parameters, convert them to
* a tag list.
@@ -846,6 +818,7 @@ void __init setup_arch(char **cmdline_p)
if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0)
squash_mem_tags(tags);
+ save_atags(tags);
parse_tags(tags);
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index eafbb2b05eb..eefae1de334 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -150,7 +150,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.pgdir = 0;
*pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
- pgd_free(pgd);
+ pgd_free(&init_mm, pgd);
if (ret) {
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
@@ -290,6 +290,11 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
local_irq_enable();
local_fiq_enable();
+ /*
+ * Setup local timer for this CPU.
+ */
+ local_timer_setup(cpu);
+
calibrate_delay();
smp_store_cpu_info(cpu);
@@ -300,11 +305,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
cpu_set(cpu, cpu_online_map);
/*
- * Setup local timer for this CPU.
- */
- local_timer_setup(cpu);
-
- /*
* OK, it's off to the idle thread for us
*/
cpu_idle();
@@ -454,6 +454,27 @@ int smp_call_function(void (*func)(void *info), void *info, int retry,
}
EXPORT_SYMBOL_GPL(smp_call_function);
+int smp_call_function_single(int cpu, void (*func)(void *info), void *info,
+ int retry, int wait)
+{
+ /* prevent preemption and reschedule on another processor */
+ int current_cpu = get_cpu();
+ int ret = 0;
+
+ if (cpu == current_cpu) {
+ local_irq_disable();
+ func(info);
+ local_irq_enable();
+ } else
+ ret = smp_call_function_on_cpu(func, info, retry, wait,
+ cpumask_of_cpu(cpu));
+
+ put_cpu();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(smp_call_function_single);
+
void show_ipi_list(struct seq_file *p)
{
unsigned int cpu;
@@ -481,8 +502,7 @@ void show_local_irqs(struct seq_file *p)
static void ipi_timer(void)
{
irq_enter();
- profile_tick(CPU_PROFILING);
- update_process_times(user_mode(get_irq_regs()));
+ local_timer_interrupt();
irq_exit();
}
@@ -621,6 +641,11 @@ void smp_send_timer(void)
send_ipi_message(mask, IPI_TIMER);
}
+void smp_timer_broadcast(cpumask_t mask)
+{
+ send_ipi_message(mask, IPI_TIMER);
+}
+
void smp_send_stop(void)
{
cpumask_t mask = cpu_online_map;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 1533d3ecd7a..b5867eca1d0 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -79,17 +79,6 @@ static unsigned long dummy_gettimeoffset(void)
}
#endif
-/*
- * An implementation of printk_clock() independent from
- * sched_clock(). This avoids non-bootable kernels when
- * printk_clock is enabled.
- */
-unsigned long long printk_clock(void)
-{
- return (unsigned long long)(jiffies - INITIAL_JIFFIES) *
- (1000000000 / HZ);
-}
-
static unsigned long next_rtc_update;
/*
@@ -195,7 +184,7 @@ static int leds_shutdown(struct sys_device *dev)
}
static struct sysdev_class leds_sysclass = {
- set_kset_name("leds"),
+ .name = "leds",
.shutdown = leds_shutdown,
.suspend = leds_suspend,
.resume = leds_resume,
@@ -336,7 +325,9 @@ void timer_tick(void)
profile_tick(CPU_PROFILING);
do_leds();
do_set_rtc();
+ write_seqlock(&xtime_lock);
do_timer(1);
+ write_sequnlock(&xtime_lock);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
@@ -369,7 +360,7 @@ static int timer_resume(struct sys_device *dev)
#endif
static struct sysdev_class timer_sysclass = {
- set_kset_name("timer"),
+ .name = "timer",
.suspend = timer_suspend,
.resume = timer_resume,
};
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 4764bd9ccee..5595fdd75e8 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/kprobes.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -46,15 +47,6 @@ __setup("user_debug=", user_debug_setup);
static void dump_mem(const char *str, unsigned long bottom, unsigned long top);
-static inline int in_exception_text(unsigned long ptr)
-{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
-
- return ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
-}
-
void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
{
#ifdef CONFIG_KALLSYMS
@@ -322,12 +314,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
get_user(instr, (u32 __user *)pc);
}
+#ifdef CONFIG_KPROBES
+ /*
+ * It is possible to have recursive kprobes, so we can't call
+ * the kprobe trap handler with the undef_lock held.
+ */
+ if (instr == KPROBE_BREAKPOINT_INSTRUCTION && !user_mode(regs)) {
+ kprobe_trap_handler(regs, instr);
+ return;
+ }
+#endif
+
spin_lock_irqsave(&undef_lock, flags);
list_for_each_entry(hook, &undef_hook, node) {
if ((instr & hook->instr_mask) == hook->instr_val &&
(regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) {
if (hook->fn(regs, instr) == 0) {
- spin_unlock_irq(&undef_lock);
+ spin_unlock_irqrestore(&undef_lock, flags);
return;
}
}
@@ -509,7 +512,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
* existence. Don't ever use this from user code.
*/
case 0xfff0:
- {
+ for (;;) {
extern void do_DataAbort(unsigned long addr, unsigned int fsr,
struct pt_regs *regs);
unsigned long val;
@@ -545,7 +548,6 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
up_read(&mm->mmap_sem);
/* simulate a write access fault */
do_DataAbort(addr, 15 + (1 << 11), regs);
- return -1;
}
#endif
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 5ff5406666b..4898bdcfe7d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -30,7 +30,7 @@ SECTIONS
}
.init : { /* Init code and data */
- *(.init.text)
+ INIT_TEXT
_einittext = .;
__proc_info_begin = .;
*(.proc.info.init)
@@ -70,15 +70,15 @@ SECTIONS
__per_cpu_end = .;
#ifndef CONFIG_XIP_KERNEL
__init_begin = _stext;
- *(.init.data)
+ INIT_DATA
. = ALIGN(4096);
__init_end = .;
#endif
}
/DISCARD/ : { /* Exit code and data */
- *(.exit.text)
- *(.exit.data)
+ EXIT_TEXT
+ EXIT_DATA
*(.exitcall.exit)
#ifndef CONFIG_MMU
*(.fixup)
@@ -94,6 +94,7 @@ SECTIONS
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
#ifdef CONFIG_MMU
*(.fixup)
#endif
@@ -129,7 +130,7 @@ SECTIONS
#ifdef CONFIG_XIP_KERNEL
. = ALIGN(4096);
__init_begin = .;
- *(.init.data)
+ INIT_DATA
. = ALIGN(4096);
__init_end = .;
#endif