diff options
Diffstat (limited to 'arch/mips/lib/memcpy.S')
| -rw-r--r-- | arch/mips/lib/memcpy.S | 416 | 
1 files changed, 277 insertions, 139 deletions
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c5c40dad0bb..c17ef80cf65 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -10,6 +10,7 @@   * Copyright (C) 2002 Broadcom, Inc.   *   memcpy/copy_user author: Mark Vandevoorde   * Copyright (C) 2007  Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd.   *   * Mnemonic names for arguments to memcpy/__copy_user   */ @@ -85,11 +86,51 @@   * they're not protected.   */ -#define EXC(inst_reg,addr,handler)		\ -9:	inst_reg, addr;				\ -	.section __ex_table,"a";		\ -	PTR	9b, handler;			\ -	.previous +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +/* Pretech type */ +#define SRC_PREFETCH 1 +#define DST_PREFETCH 2 +#define LEGACY_MODE 1 +#define EVA_MODE    2 +#define USEROP   1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn    : Load/store instruction + * type    : Instruction type + * reg     : Register + * addr    : Address + * handler : Exception handler + */ + +#define EXC(insn, type, reg, addr, handler)			\ +	.if \mode == LEGACY_MODE;				\ +9:		insn reg, addr;					\ +		.section __ex_table,"a";			\ +		PTR	9b, handler;				\ +		.previous;					\ +	/* This is assembled in EVA mode */			\ +	.else;							\ +		/* If loading from user or storing to user */	\ +		.if ((\from == USEROP) && (type == LD_INSN)) || \ +		    ((\to == USEROP) && (type == ST_INSN));	\ +9:			__BUILD_EVA_INSN(insn##e, reg, addr);	\ +			.section __ex_table,"a";		\ +			PTR	9b, handler;			\ +			.previous;				\ +		.else;						\ +			/*					\ +			 *  Still in EVA, but no need for	\ +			 * exception handler or EVA insn	\ +			 */					\ +			insn reg, addr;				\ +		.endif;						\ +	.endif  /*   * Only on the 64-bit kernel we can made use of 64-bit registers. @@ -100,12 +141,13 @@  #ifdef USE_DOUBLE -#define LOAD   ld -#define LOADL  ldl -#define LOADR  ldr -#define STOREL sdl -#define STORER sdr -#define STORE  sd +#define LOADK ld /* No exception */ +#define LOAD(reg, addr, handler)	EXC(ld, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler)	EXC(ldl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler)	EXC(ldr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler)	EXC(sdl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler)	EXC(sdr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler)	EXC(sd, ST_INSN, reg, addr, handler)  #define ADD    daddu  #define SUB    dsubu  #define SRL    dsrl @@ -136,12 +178,13 @@  #else -#define LOAD   lw -#define LOADL  lwl -#define LOADR  lwr -#define STOREL swl -#define STORER swr -#define STORE  sw +#define LOADK lw /* No exception */ +#define LOAD(reg, addr, handler)	EXC(lw, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler)	EXC(lwl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler)	EXC(lwr, LD_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler)	EXC(swl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler)	EXC(swr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler)	EXC(sw, ST_INSN, reg, addr, handler)  #define ADD    addu  #define SUB    subu  #define SRL    srl @@ -154,6 +197,33 @@  #endif /* USE_DOUBLE */ +#define LOADB(reg, addr, handler)	EXC(lb, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler) + +#define _PREF(hint, addr, type)						\ +	.if \mode == LEGACY_MODE;					\ +		PREF(hint, addr);					\ +	.else;								\ +		.if ((\from == USEROP) && (type == SRC_PREFETCH)) ||	\ +		    ((\to == USEROP) && (type == DST_PREFETCH));	\ +			/*						\ +			 * PREFE has only 9 bits for the offset		\ +			 * compared to PREF which has 16, so it may	\ +			 * need to use the $at register but this	\ +			 * register should remain intact because it's	\ +			 * used later on. Therefore use $v1.		\ +			 */						\ +			.set at=v1;					\ +			PREFE(hint, addr);				\ +			.set noat;					\ +		.else;							\ +			PREF(hint, addr);				\ +		.endif;							\ +	.endif + +#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH) +#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH) +  #ifdef CONFIG_CPU_LITTLE_ENDIAN  #define LDFIRST LOADR  #define LDREST	LOADL @@ -182,27 +252,23 @@  	.set	at=v1  #endif -/* - * t6 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) -	b	__copy_user_common -	 li	t6, 1 -	END(__copy_user_inatomic) - -/* - * A combined memcpy/__copy_user - * __copy_user sets len to 0 for success; else to an upper bound of - * the number of uncopied bytes. - * memcpy sets v0 to dst. - */  	.align	5 -LEAF(memcpy)					/* a0=dst a1=src a2=len */ -	move	v0, dst				/* return value */ -.L__memcpy: -FEXPORT(__copy_user) -	li	t6, 0	/* not inatomic */ -__copy_user_common: + +	/* +	 * Macro to build the __copy_user common code +	 * Arguements: +	 * mode : LEGACY_MODE or EVA_MODE +	 * from : Source operand. USEROP or KERNELOP +	 * to   : Destination operand. USEROP or KERNELOP +	 */ +	.macro __BUILD_COPY_USER mode, from, to + +	/* initialize __memcpy if this the first time we execute this macro */ +	.ifnotdef __memcpy +	.set __memcpy, 1 +	.hidden __memcpy /* make sure it does not leak */ +	.endif +  	/*  	 * Note: dst & src may be unaligned, len may be 0  	 * Temps @@ -217,94 +283,94 @@ __copy_user_common:  	 *  	 * If len < NBYTES use byte operations.  	 */ -	PREF(	0, 0(src) ) -	PREF(	1, 0(dst) ) +	PREFS(	0, 0(src) ) +	PREFD(	1, 0(dst) )  	sltu	t2, len, NBYTES  	and	t1, dst, ADDRMASK -	PREF(	0, 1*32(src) ) -	PREF(	1, 1*32(dst) ) -	bnez	t2, .Lcopy_bytes_checklen +	PREFS(	0, 1*32(src) ) +	PREFD(	1, 1*32(dst) ) +	bnez	t2, .Lcopy_bytes_checklen\@  	 and	t0, src, ADDRMASK -	PREF(	0, 2*32(src) ) -	PREF(	1, 2*32(dst) ) -	bnez	t1, .Ldst_unaligned +	PREFS(	0, 2*32(src) ) +	PREFD(	1, 2*32(dst) ) +	bnez	t1, .Ldst_unaligned\@  	 nop -	bnez	t0, .Lsrc_unaligned_dst_aligned +	bnez	t0, .Lsrc_unaligned_dst_aligned\@  	/*  	 * use delay slot for fall-through  	 * src and dst are aligned; need to compute rem  	 */ -.Lboth_aligned: +.Lboth_aligned\@:  	 SRL	t0, len, LOG_NBYTES+3	 # +3 for 8 units/iter -	beqz	t0, .Lcleanup_both_aligned # len < 8*NBYTES +	beqz	t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES  	 and	rem, len, (8*NBYTES-1)	 # rem = len % (8*NBYTES) -	PREF(	0, 3*32(src) ) -	PREF(	1, 3*32(dst) ) +	PREFS(	0, 3*32(src) ) +	PREFD(	1, 3*32(dst) )  	.align	4  1:  	R10KCBARRIER(0(ra)) -EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc) -EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy) -EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy) -EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy) +	LOAD(t0, UNIT(0)(src), .Ll_exc\@) +	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) +	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) +	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)  	SUB	len, len, 8*NBYTES -EXC(	LOAD	t4, UNIT(4)(src),	.Ll_exc_copy) -EXC(	LOAD	t7, UNIT(5)(src),	.Ll_exc_copy) -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc_p8u) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc_p7u) -EXC(	LOAD	t0, UNIT(6)(src),	.Ll_exc_copy) -EXC(	LOAD	t1, UNIT(7)(src),	.Ll_exc_copy) +	LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) +	LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@) +	STORE(t0, UNIT(0)(dst),	.Ls_exc_p8u\@) +	STORE(t1, UNIT(1)(dst),	.Ls_exc_p7u\@) +	LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@) +	LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)  	ADD	src, src, 8*NBYTES  	ADD	dst, dst, 8*NBYTES -EXC(	STORE	t2, UNIT(-6)(dst),	.Ls_exc_p6u) -EXC(	STORE	t3, UNIT(-5)(dst),	.Ls_exc_p5u) -EXC(	STORE	t4, UNIT(-4)(dst),	.Ls_exc_p4u) -EXC(	STORE	t7, UNIT(-3)(dst),	.Ls_exc_p3u) -EXC(	STORE	t0, UNIT(-2)(dst),	.Ls_exc_p2u) -EXC(	STORE	t1, UNIT(-1)(dst),	.Ls_exc_p1u) -	PREF(	0, 8*32(src) ) -	PREF(	1, 8*32(dst) ) +	STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@) +	STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@) +	STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@) +	STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@) +	STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@) +	STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@) +	PREFS(	0, 8*32(src) ) +	PREFD(	1, 8*32(dst) )  	bne	len, rem, 1b  	 nop  	/*  	 * len == rem == the number of bytes left to copy < 8*NBYTES  	 */ -.Lcleanup_both_aligned: -	beqz	len, .Ldone +.Lcleanup_both_aligned\@: +	beqz	len, .Ldone\@  	 sltu	t0, len, 4*NBYTES -	bnez	t0, .Lless_than_4units +	bnez	t0, .Lless_than_4units\@  	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES  	/*  	 * len >= 4*NBYTES  	 */ -EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc) -EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy) -EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy) -EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy) +	LOAD( t0, UNIT(0)(src),	.Ll_exc\@) +	LOAD( t1, UNIT(1)(src),	.Ll_exc_copy\@) +	LOAD( t2, UNIT(2)(src),	.Ll_exc_copy\@) +	LOAD( t3, UNIT(3)(src),	.Ll_exc_copy\@)  	SUB	len, len, 4*NBYTES  	ADD	src, src, 4*NBYTES  	R10KCBARRIER(0(ra)) -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc_p4u) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc_p3u) -EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc_p2u) -EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc_p1u) +	STORE(t0, UNIT(0)(dst),	.Ls_exc_p4u\@) +	STORE(t1, UNIT(1)(dst),	.Ls_exc_p3u\@) +	STORE(t2, UNIT(2)(dst),	.Ls_exc_p2u\@) +	STORE(t3, UNIT(3)(dst),	.Ls_exc_p1u\@)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 4*NBYTES -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	.set	noreorder -.Lless_than_4units: +.Lless_than_4units\@:  	/*  	 * rem = len % NBYTES  	 */ -	beq	rem, len, .Lcopy_bytes +	beq	rem, len, .Lcopy_bytes\@  	 nop  1:  	R10KCBARRIER(0(ra)) -EXC(	LOAD	t0, 0(src),		.Ll_exc) +	LOAD(t0, 0(src), .Ll_exc\@)  	ADD	src, src, NBYTES  	SUB	len, len, NBYTES -EXC(	STORE	t0, 0(dst),		.Ls_exc_p1u) +	STORE(t0, 0(dst), .Ls_exc_p1u\@)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, NBYTES  	bne	rem, len, 1b @@ -322,17 +388,17 @@ EXC(	STORE	t0, 0(dst),		.Ls_exc_p1u)  	 * more instruction-level parallelism.  	 */  #define bits t2 -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	 ADD	t1, dst, len	# t1 is just past last byte of dst  	li	bits, 8*NBYTES  	SLL	rem, len, 3	# rem = number of bits to keep -EXC(	LOAD	t0, 0(src),		.Ll_exc) +	LOAD(t0, 0(src), .Ll_exc\@)  	SUB	bits, bits, rem # bits = number of bits to discard  	SHIFT_DISCARD t0, t0, bits -EXC(	STREST	t0, -1(t1),		.Ls_exc) +	STREST(t0, -1(t1), .Ls_exc\@)  	jr	ra  	 move	len, zero -.Ldst_unaligned: +.Ldst_unaligned\@:  	/*  	 * dst is unaligned  	 * t0 = src & ADDRMASK @@ -343,25 +409,25 @@ EXC(	STREST	t0, -1(t1),		.Ls_exc)  	 * Set match = (src and dst have same alignment)  	 */  #define match rem -EXC(	LDFIRST t3, FIRST(0)(src),	.Ll_exc) +	LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)  	ADD	t2, zero, NBYTES -EXC(	LDREST	t3, REST(0)(src),	.Ll_exc_copy) +	LDREST(t3, REST(0)(src), .Ll_exc_copy\@)  	SUB	t2, t2, t1	# t2 = number of bytes copied  	xor	match, t0, t1  	R10KCBARRIER(0(ra)) -EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc) -	beq	len, t2, .Ldone +	STFIRST(t3, FIRST(0)(dst), .Ls_exc\@) +	beq	len, t2, .Ldone\@  	 SUB	len, len, t2  	ADD	dst, dst, t2 -	beqz	match, .Lboth_aligned +	beqz	match, .Lboth_aligned\@  	 ADD	src, src, t2 -.Lsrc_unaligned_dst_aligned: +.Lsrc_unaligned_dst_aligned\@:  	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter -	PREF(	0, 3*32(src) ) -	beqz	t0, .Lcleanup_src_unaligned +	PREFS(	0, 3*32(src) ) +	beqz	t0, .Lcleanup_src_unaligned\@  	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES -	PREF(	1, 3*32(dst) ) +	PREFD(	1, 3*32(dst) )  1:  /*   * Avoid consecutive LD*'s to the same register since some mips @@ -370,58 +436,58 @@ EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc)   * are to the same unit (unless src is aligned, but it's not).   */  	R10KCBARRIER(0(ra)) -EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc) -EXC(	LDFIRST t1, FIRST(1)(src),	.Ll_exc_copy) +	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) +	LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)  	SUB	len, len, 4*NBYTES -EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy) -EXC(	LDREST	t1, REST(1)(src),	.Ll_exc_copy) -EXC(	LDFIRST t2, FIRST(2)(src),	.Ll_exc_copy) -EXC(	LDFIRST t3, FIRST(3)(src),	.Ll_exc_copy) -EXC(	LDREST	t2, REST(2)(src),	.Ll_exc_copy) -EXC(	LDREST	t3, REST(3)(src),	.Ll_exc_copy) -	PREF(	0, 9*32(src) )		# 0 is PREF_LOAD  (not streamed) +	LDREST(t0, REST(0)(src), .Ll_exc_copy\@) +	LDREST(t1, REST(1)(src), .Ll_exc_copy\@) +	LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) +	LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) +	LDREST(t2, REST(2)(src), .Ll_exc_copy\@) +	LDREST(t3, REST(3)(src), .Ll_exc_copy\@) +	PREFS(	0, 9*32(src) )		# 0 is PREF_LOAD  (not streamed)  	ADD	src, src, 4*NBYTES  #ifdef CONFIG_CPU_SB1  	nop				# improves slotting  #endif -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc_p4u) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc_p3u) -EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc_p2u) -EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc_p1u) -	PREF(	1, 9*32(dst) )		# 1 is PREF_STORE (not streamed) +	STORE(t0, UNIT(0)(dst),	.Ls_exc_p4u\@) +	STORE(t1, UNIT(1)(dst),	.Ls_exc_p3u\@) +	STORE(t2, UNIT(2)(dst),	.Ls_exc_p2u\@) +	STORE(t3, UNIT(3)(dst),	.Ls_exc_p1u\@) +	PREFD(	1, 9*32(dst) )		# 1 is PREF_STORE (not streamed)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 4*NBYTES  	bne	len, rem, 1b  	.set	noreorder -.Lcleanup_src_unaligned: -	beqz	len, .Ldone +.Lcleanup_src_unaligned\@: +	beqz	len, .Ldone\@  	 and	rem, len, NBYTES-1  # rem = len % NBYTES -	beq	rem, len, .Lcopy_bytes +	beq	rem, len, .Lcopy_bytes\@  	 nop  1:  	R10KCBARRIER(0(ra)) -EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc) -EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy) +	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) +	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)  	ADD	src, src, NBYTES  	SUB	len, len, NBYTES -EXC(	STORE	t0, 0(dst),		.Ls_exc_p1u) +	STORE(t0, 0(dst), .Ls_exc_p1u\@)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, NBYTES  	bne	len, rem, 1b  	.set	noreorder -.Lcopy_bytes_checklen: -	beqz	len, .Ldone +.Lcopy_bytes_checklen\@: +	beqz	len, .Ldone\@  	 nop -.Lcopy_bytes: +.Lcopy_bytes\@:  	/* 0 < len < NBYTES  */  	R10KCBARRIER(0(ra))  #define COPY_BYTE(N)			\ -EXC(	lb	t0, N(src), .Ll_exc);	\ +	LOADB(t0, N(src), .Ll_exc\@);	\  	SUB	len, len, 1;		\ -	beqz	len, .Ldone;		\ -EXC(	 sb	t0, N(dst), .Ls_exc_p1) +	beqz	len, .Ldone\@;		\ +	STOREB(t0, N(dst), .Ls_exc_p1\@)  	COPY_BYTE(0)  	COPY_BYTE(1) @@ -431,16 +497,19 @@ EXC(	 sb	t0, N(dst), .Ls_exc_p1)  	COPY_BYTE(4)  	COPY_BYTE(5)  #endif -EXC(	lb	t0, NBYTES-2(src), .Ll_exc) +	LOADB(t0, NBYTES-2(src), .Ll_exc\@)  	SUB	len, len, 1  	jr	ra -EXC(	 sb	t0, NBYTES-2(dst), .Ls_exc_p1) -.Ldone: +	STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) +.Ldone\@:  	jr	ra -	 nop +	.if __memcpy == 1  	END(memcpy) +	.set __memcpy, 0 +	.hidden __memcpy +	.endif -.Ll_exc_copy: +.Ll_exc_copy\@:  	/*  	 * Copy bytes from src until faulting load address (or until a  	 * lb faults) @@ -451,24 +520,24 @@ EXC(	 sb	t0, NBYTES-2(dst), .Ls_exc_p1)  	 *  	 * Assumes src < THREAD_BUADDR($28)  	 */ -	LOAD	t0, TI_TASK($28) +	LOADK	t0, TI_TASK($28)  	 nop -	LOAD	t0, THREAD_BUADDR(t0) +	LOADK	t0, THREAD_BUADDR(t0)  1: -EXC(	lb	t1, 0(src),	.Ll_exc) +	LOADB(t1, 0(src), .Ll_exc\@)  	ADD	src, src, 1  	sb	t1, 0(dst)	# can't fault -- we're copy_from_user  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 1  	bne	src, t0, 1b  	.set	noreorder -.Ll_exc: -	LOAD	t0, TI_TASK($28) +.Ll_exc\@: +	LOADK	t0, TI_TASK($28)  	 nop -	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address +	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address  	 nop  	SUB	len, AT, t0		# len number of uncopied bytes -	bnez	t6, .Ldone	/* Skip the zeroing part if inatomic */ +	bnez	t6, .Ldone\@	/* Skip the zeroing part if inatomic */  	/*  	 * Here's where we rely on src and dst being incremented in tandem,  	 *   See (3) above. @@ -482,7 +551,7 @@ EXC(	lb	t1, 0(src),	.Ll_exc)  	 */  	.set	reorder				/* DADDI_WAR */  	SUB	src, len, 1 -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	.set	noreorder  1:	sb	zero, 0(dst)  	ADD	dst, dst, 1 @@ -503,7 +572,7 @@ EXC(	lb	t1, 0(src),	.Ll_exc)  #define SEXC(n)							\  	.set	reorder;			/* DADDI_WAR */ \ -.Ls_exc_p ## n ## u:						\ +.Ls_exc_p ## n ## u\@:						\  	ADD	len, len, n*NBYTES;				\  	jr	ra;						\  	.set	noreorder @@ -517,14 +586,15 @@ SEXC(3)  SEXC(2)  SEXC(1) -.Ls_exc_p1: +.Ls_exc_p1\@:  	.set	reorder				/* DADDI_WAR */  	ADD	len, len, 1  	jr	ra  	.set	noreorder -.Ls_exc: +.Ls_exc\@:  	jr	ra  	 nop +	.endm  	.align	5  LEAF(memmove) @@ -575,3 +645,71 @@ LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */  	jr	ra  	 move	a2, zero  	END(__rmemcpy) + +/* + * t6 is used as a flag to note inatomic mode. + */ +LEAF(__copy_user_inatomic) +	b	__copy_user_common +	li	t6, 1 +	END(__copy_user_inatomic) + +/* + * A combined memcpy/__copy_user + * __copy_user sets len to 0 for success; else to an upper bound of + * the number of uncopied bytes. + * memcpy sets v0 to dst. + */ +	.align	5 +LEAF(memcpy)					/* a0=dst a1=src a2=len */ +	move	v0, dst				/* return value */ +.L__memcpy: +FEXPORT(__copy_user) +	li	t6, 0	/* not inatomic */ +__copy_user_common: +	/* Legacy Mode, user <-> user */ +	__BUILD_COPY_USER LEGACY_MODE USEROP USEROP + +#ifdef CONFIG_EVA + +/* + * For EVA we need distinct symbols for reading and writing to user space. + * This is because we need to use specific EVA instructions to perform the + * virtual <-> physical translation when a virtual address is actually in user + * space + */ + +LEAF(__copy_user_inatomic_eva) +	b       __copy_from_user_common +	li	t6, 1 +	END(__copy_user_inatomic_eva) + +/* + * __copy_from_user (EVA) + */ + +LEAF(__copy_from_user_eva) +	li	t6, 0	/* not inatomic */ +__copy_from_user_common: +	__BUILD_COPY_USER EVA_MODE USEROP KERNELOP +END(__copy_from_user_eva) + + + +/* + * __copy_to_user (EVA) + */ + +LEAF(__copy_to_user_eva) +__BUILD_COPY_USER EVA_MODE KERNELOP USEROP +END(__copy_to_user_eva) + +/* + * __copy_in_user (EVA) + */ + +LEAF(__copy_in_user_eva) +__BUILD_COPY_USER EVA_MODE USEROP USEROP +END(__copy_in_user_eva) + +#endif  | 
