diff options
Diffstat (limited to 'arch/mips/kernel/ftrace.c')
| -rw-r--r-- | arch/mips/kernel/ftrace.c | 256 | 
1 files changed, 170 insertions, 86 deletions
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f1123..60e7e5e45af 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -11,35 +11,53 @@  #include <linux/uaccess.h>  #include <linux/init.h>  #include <linux/ftrace.h> +#include <linux/syscalls.h>  #include <asm/asm.h>  #include <asm/asm-offsets.h>  #include <asm/cacheflush.h> +#include <asm/syscall.h>  #include <asm/uasm.h> +#include <asm/unistd.h> + +#include <asm-generic/sections.h> + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +/* Arch override because MIPS doesn't need to run this from stop_machine() */ +void arch_ftrace_update_code(int command) +{ +	ftrace_modify_all_code(command); +} + +#endif  /* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. + * Check if the address is in kernel space   * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections.   */ - -static inline int in_module(unsigned long ip) +static inline int in_kernel_space(unsigned long ip)  { -	return ip & 0x40000000; +	if (ip >= (unsigned long)_stext && +	    ip <= (unsigned long)_etext) +		return 1; +	return 0;  }  #ifdef CONFIG_DYNAMIC_FTRACE  #define JAL 0x0c000000		/* jump & link: ip --> ra, jump to target */  #define ADDR_MASK 0x03ffffff	/*  op_code|addr : 31...26|25 ....0 */ +#define JUMP_RANGE_MASK ((1UL << 28) - 1) -#define INSN_B_1F_4 0x10000004	/* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005	/* b 1f; offset = 5 */  #define INSN_NOP 0x00000000	/* nop */  #define INSN_JAL(addr)	\  	((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -60,18 +78,19 @@ static inline void ftrace_dyn_arch_init_insns(void)  	/* jal (ftrace_caller + 8), jump over the first two instruction */  	buf = (u32 *)&insn_jal_ftrace_caller; -	uasm_i_jal(&buf, (FTRACE_ADDR + 8)); +	uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);  #ifdef CONFIG_FUNCTION_GRAPH_TRACER  	/* j ftrace_graph_caller */  	buf = (u32 *)&insn_j_ftrace_graph_caller; -	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); +	uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);  #endif  }  static int ftrace_modify_code(unsigned long ip, unsigned int new_code)  {  	int faulted; +	mm_segment_t old_fs;  	/* *(unsigned int *)ip = new_code; */  	safe_store_code(new_code, ip, faulted); @@ -79,11 +98,62 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)  	if (unlikely(faulted))  		return -EFAULT; +	old_fs = get_fs(); +	set_fs(get_ds());  	flush_icache_range(ip, ip + 8); +	set_fs(old_fs);  	return 0;  } +#ifndef CONFIG_64BIT +static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, +				unsigned int new_code2) +{ +	int faulted; + +	safe_store_code(new_code1, ip, faulted); +	if (unlikely(faulted)) +		return -EFAULT; +	safe_store_code(new_code2, ip + 4, faulted); +	if (unlikely(faulted)) +		return -EFAULT; +	flush_icache_range(ip, ip + 8); +	return 0; +} +#endif + +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount		--> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + *  sub sp, sp, 8 + *				    1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount	     --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + *  nop | move $12, ra_address | sub sp, sp, 8 + *				    1: offset = 4 instructions + */ + +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) +  int ftrace_make_nop(struct module *mod,  		    struct dyn_ftrace *rec, unsigned long addr)  { @@ -91,40 +161,22 @@ int ftrace_make_nop(struct module *mod,  	unsigned long ip = rec->ip;  	/* -	 * We have compiled module with -mlong-calls, but compiled the kernel -	 * without it, we need to cope with them respectively. +	 * If ip is in kernel space, no long call, otherwise, long call is +	 * needed.  	 */ -	if (in_module(ip)) { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) -		/* -		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000005) -		 * addiu v1, v1, low_16bit_of_mcount -		 * move at, ra -		 * move $12, ra_address -		 * jalr v1 -		 *  sub sp, sp, 8 -		 *                                  1: offset = 5 instructions -		 */ -		new = INSN_B_1F_5; +	new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; +#ifdef CONFIG_64BIT +	return ftrace_modify_code(ip, new);  #else -		/* -		 * lui v1, hi_16bit_of_mcount        --> b 1f (0x10000004) -		 * addiu v1, v1, low_16bit_of_mcount -		 * move at, ra -		 * jalr v1 -		 *  nop | move $12, ra_address | sub sp, sp, 8 -		 *                                  1: offset = 4 instructions -		 */ -		new = INSN_B_1F_4; +	/* +	 * On 32 bit MIPS platforms, gcc adds a stack adjust +	 * instruction in the delay slot after the branch to +	 * mcount and expects mcount to restore the sp on return. +	 * This is based on a legacy API and does nothing but +	 * waste instructions so it's being removed at runtime. +	 */ +	return ftrace_modify_code_2(ip, new, INSN_NOP);  #endif -	} else { -		/* -		 * move at, ra -		 * jal _mcount		--> nop -		 */ -		new = INSN_NOP; -	} -	return ftrace_modify_code(ip, new);  }  int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) @@ -132,8 +184,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)  	unsigned int new;  	unsigned long ip = rec->ip; -	/* ip, module: 0xc0000000, kernel: 0x80000000 */ -	new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; +	new = in_kernel_space(ip) ? insn_jal_ftrace_caller : +		insn_lui_v1_hi16_mcount;  	return ftrace_modify_code(ip, new);  } @@ -149,7 +201,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)  	return ftrace_modify_code(FTRACE_CALL_IP, new);  } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void)  {  	/* Encode the instructions when booting */  	ftrace_dyn_arch_init_insns(); @@ -157,9 +209,6 @@ int __init ftrace_dyn_arch_init(void *data)  	/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */  	ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); -	/* The return code is retured via data */ -	*(unsigned long *)data = 0; -  	return 0;  }  #endif	/* CONFIG_DYNAMIC_FTRACE */ @@ -186,33 +235,29 @@ int ftrace_disable_ftrace_graph_caller(void)  #ifndef KBUILD_MCOUNT_RA_ADDRESS -#define S_RA_SP	(0xafbf << 16)	/* s{d,w} ra, offset(sp) */ -#define S_R_SP	(0xafb0 << 16)  /* s{d,w} R, offset(sp) */ +#define S_RA_SP (0xafbf << 16)	/* s{d,w} ra, offset(sp) */ +#define S_R_SP	(0xafb0 << 16)	/* s{d,w} R, offset(sp) */  #define OFFSET_MASK	0xffff	/* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, -				     unsigned long parent, -				     unsigned long parent_addr, -				     unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +		old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)  { -	unsigned long sp, ip, ra; +	unsigned long sp, ip, tmp;  	unsigned int code;  	int faulted;  	/* -	 * For module, move the ip from calling site of mcount to the -	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for -	 * kernel, move to the instruction "move ra, at"(offset is 12) +	 * For module, move the ip from the return address after the +	 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for +	 * kernel, move after the instruction "move ra, at"(offset is 16)  	 */ -	ip = self_addr - (in_module(self_addr) ? 20 : 12); +	ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);  	/*  	 * search the text until finding the non-store instruction or "s{d,w}  	 * ra, offset(sp)" instruction  	 */  	do { -		ip -= 4; -  		/* get the code at "ip": code = *(unsigned int *)ip; */  		safe_load_code(code, ip, faulted); @@ -224,18 +269,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,  		 * store the ra on the stack  		 */  		if ((code & S_R_SP) != S_R_SP) -			return parent_addr; +			return parent_ra_addr; -	} while (((code & S_RA_SP) != S_RA_SP)); +		/* Move to the next instruction */ +		ip -= 4; +	} while ((code & S_RA_SP) != S_RA_SP);  	sp = fp + (code & OFFSET_MASK); -	/* ra = *(unsigned long *)sp; */ -	safe_load_stack(ra, sp, faulted); +	/* tmp = *(unsigned long *)sp; */ +	safe_load_stack(tmp, sp, faulted);  	if (unlikely(faulted))  		return 0; -	if (ra == parent) +	if (tmp == old_parent_ra)  		return sp;  	return 0;  } @@ -246,21 +293,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,   * Hook the return address and push it in the stack of return addrs   * in current thread info.   */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,  			   unsigned long fp)  { -	unsigned long old; +	unsigned long old_parent_ra;  	struct ftrace_graph_ent trace;  	unsigned long return_hooker = (unsigned long)  	    &return_to_handler; -	int faulted; +	int faulted, insns;  	if (unlikely(atomic_read(¤t->tracing_graph_pause)))  		return;  	/* -	 * "parent" is the stack address saved the return address of the caller -	 * of _mcount. +	 * "parent_ra_addr" is the stack address saved the return address of +	 * the caller of _mcount.  	 *  	 * if the gcc < 4.5, a leaf function does not save the return address  	 * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +322,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,  	 * do it in ftrace_graph_caller of mcount.S.  	 */ -	/* old = *parent; */ -	safe_load_stack(old, parent, faulted); +	/* old_parent_ra = *parent_ra_addr; */ +	safe_load_stack(old_parent_ra, parent_ra_addr, faulted);  	if (unlikely(faulted))  		goto out;  #ifndef KBUILD_MCOUNT_RA_ADDRESS -	parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, -			(unsigned long)parent, fp); +	parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, +			old_parent_ra, (unsigned long)parent_ra_addr, fp);  	/*  	 * If fails when getting the stack address of the non-leaf function's  	 * ra, stop function graph tracer and return  	 */ -	if (parent == 0) +	if (parent_ra_addr == 0)  		goto out;  #endif -	/* *parent = return_hooker; */ -	safe_store_stack(return_hooker, parent, faulted); +	/* *parent_ra_addr = return_hooker; */ +	safe_store_stack(return_hooker, parent_ra_addr, faulted);  	if (unlikely(faulted))  		goto out; -	if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == -	    -EBUSY) { -		*parent = old; +	if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) +	    == -EBUSY) { +		*parent_ra_addr = old_parent_ra;  		return;  	} -	trace.func = self_addr; +	/* +	 * Get the recorded ip of the current mcount calling site in the +	 * __mcount_loc section, which will be used to filter the function +	 * entries configured through the tracing/set_graph_function interface. +	 */ + +	insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; +	trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);  	/* Only trace if the calling function expects to */  	if (!ftrace_graph_entry(&trace)) {  		current->curr_ret_stack--; -		*parent = old; +		*parent_ra_addr = old_parent_ra;  	}  	return;  out: @@ -313,3 +367,33 @@ out:  	WARN_ON(1);  }  #endif	/* CONFIG_FUNCTION_GRAPH_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS + +#ifdef CONFIG_32BIT +unsigned long __init arch_syscall_addr(int nr) +{ +	return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; +} +#endif + +#ifdef CONFIG_64BIT + +unsigned long __init arch_syscall_addr(int nr) +{ +#ifdef CONFIG_MIPS32_N32 +	if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls) +		return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; +#endif +	if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls) +		return (unsigned long)sys_call_table[nr - __NR_64_Linux]; +#ifdef CONFIG_MIPS32_O32 +	if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls) +		return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; +#endif + +	return (unsigned long) &sys_ni_syscall; +} +#endif + +#endif /* CONFIG_FTRACE_SYSCALLS */  | 
