diff options
Diffstat (limited to 'arch/mips/lib/csum_partial.S')
| -rw-r--r-- | arch/mips/lib/csum_partial.S | 291 | 
1 files changed, 183 insertions, 108 deletions
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index a6adffbb4e5..9901237563c 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -8,6 +8,7 @@   * Copyright (C) 1998, 1999 Ralf Baechle   * Copyright (C) 1999 Silicon Graphics, Inc.   * Copyright (C) 2007  Maciej W. Rozycki + * Copyright (C) 2014 Imagination Technologies Ltd.   */  #include <linux/errno.h>  #include <asm/asm.h> @@ -55,14 +56,20 @@  #define UNIT(unit)  ((unit)*NBYTES)  #define ADDC(sum,reg)						\ +	.set	push;						\ +	.set	noat;						\  	ADD	sum, reg;					\  	sltu	v1, sum, reg;					\  	ADD	sum, v1;					\ +	.set	pop  #define ADDC32(sum,reg)						\ +	.set	push;						\ +	.set	noat;						\  	addu	sum, reg;					\  	sltu	v1, sum, reg;					\  	addu	sum, v1;					\ +	.set	pop  #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)	\  	LOAD	_t0, (offset + UNIT(0))(src);			\ @@ -296,7 +303,7 @@ LEAF(csum_partial)   * checksum and copy routines based on memcpy.S   *   *	csum_partial_copy_nocheck(src, dst, len, sum) - *	__csum_partial_copy_user(src, dst, len, sum, errp) + *	__csum_partial_copy_kernel(src, dst, len, sum, errp)   *   * See "Spec" in memcpy.S for details.	Unlike __copy_user, all   * function in this file use the standard calling convention. @@ -327,20 +334,58 @@ LEAF(csum_partial)   * These handlers do not need to overwrite any data.   */ -#define EXC(inst_reg,addr,handler)		\ -9:	inst_reg, addr;				\ -	.section __ex_table,"a";		\ -	PTR	9b, handler;			\ -	.previous +/* Instruction type */ +#define LD_INSN 1 +#define ST_INSN 2 +#define LEGACY_MODE 1 +#define EVA_MODE    2 +#define USEROP   1 +#define KERNELOP 2 + +/* + * Wrapper to add an entry in the exception table + * in case the insn causes a memory exception. + * Arguments: + * insn    : Load/store instruction + * type    : Instruction type + * reg     : Register + * addr    : Address + * handler : Exception handler + */ +#define EXC(insn, type, reg, addr, handler)	\ +	.if \mode == LEGACY_MODE;		\ +9:		insn reg, addr;			\ +		.section __ex_table,"a";	\ +		PTR	9b, handler;		\ +		.previous;			\ +	/* This is enabled in EVA mode */	\ +	.else;					\ +		/* If loading from user or storing to user */	\ +		.if ((\from == USEROP) && (type == LD_INSN)) || \ +		    ((\to == USEROP) && (type == ST_INSN));	\ +9:			__BUILD_EVA_INSN(insn##e, reg, addr);	\ +			.section __ex_table,"a";		\ +			PTR	9b, handler;			\ +			.previous;				\ +		.else;						\ +			/* EVA without exception */		\ +			insn reg, addr;				\ +		.endif;						\ +	.endif + +#undef LOAD  #ifdef USE_DOUBLE -#define LOAD   ld -#define LOADL  ldl -#define LOADR  ldr -#define STOREL sdl -#define STORER sdr -#define STORE  sd +#define LOADK	ld /* No exception */ +#define LOAD(reg, addr, handler)	EXC(ld, LD_INSN, reg, addr, handler) +#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler)	EXC(ldl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler)	EXC(ldr, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler)	EXC(sdl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler)	EXC(sdr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler)	EXC(sd, ST_INSN, reg, addr, handler)  #define ADD    daddu  #define SUB    dsubu  #define SRL    dsrl @@ -352,12 +397,15 @@ LEAF(csum_partial)  #else -#define LOAD   lw -#define LOADL  lwl -#define LOADR  lwr -#define STOREL swl -#define STORER swr -#define STORE  sw +#define LOADK	lw /* No exception */ +#define LOAD(reg, addr, handler)	EXC(lw, LD_INSN, reg, addr, handler) +#define LOADBU(reg, addr, handler)	EXC(lbu, LD_INSN, reg, addr, handler) +#define LOADL(reg, addr, handler)	EXC(lwl, LD_INSN, reg, addr, handler) +#define LOADR(reg, addr, handler)	EXC(lwr, LD_INSN, reg, addr, handler) +#define STOREB(reg, addr, handler)	EXC(sb, ST_INSN, reg, addr, handler) +#define STOREL(reg, addr, handler)	EXC(swl, ST_INSN, reg, addr, handler) +#define STORER(reg, addr, handler)	EXC(swr, ST_INSN, reg, addr, handler) +#define STORE(reg, addr, handler)	EXC(sw, ST_INSN, reg, addr, handler)  #define ADD    addu  #define SUB    subu  #define SRL    srl @@ -396,14 +444,20 @@ LEAF(csum_partial)  	.set	at=v1  #endif -LEAF(__csum_partial_copy_user) +	.macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck +  	PTR_ADDU	AT, src, len	/* See (1) above. */ +	/* initialize __nocheck if this the first time we execute this +	 * macro +	 */  #ifdef CONFIG_64BIT  	move	errptr, a4  #else  	lw	errptr, 16(sp)  #endif -FEXPORT(csum_partial_copy_nocheck) +	.if \__nocheck == 1 +	FEXPORT(csum_partial_copy_nocheck) +	.endif  	move	sum, zero  	move	odd, zero  	/* @@ -419,48 +473,48 @@ FEXPORT(csum_partial_copy_nocheck)  	 */  	sltu	t2, len, NBYTES  	and	t1, dst, ADDRMASK -	bnez	t2, .Lcopy_bytes_checklen +	bnez	t2, .Lcopy_bytes_checklen\@  	 and	t0, src, ADDRMASK  	andi	odd, dst, 0x1			/* odd buffer? */ -	bnez	t1, .Ldst_unaligned +	bnez	t1, .Ldst_unaligned\@  	 nop -	bnez	t0, .Lsrc_unaligned_dst_aligned +	bnez	t0, .Lsrc_unaligned_dst_aligned\@  	/*  	 * use delay slot for fall-through  	 * src and dst are aligned; need to compute rem  	 */ -.Lboth_aligned: +.Lboth_aligned\@:  	 SRL	t0, len, LOG_NBYTES+3	 # +3 for 8 units/iter -	beqz	t0, .Lcleanup_both_aligned # len < 8*NBYTES +	beqz	t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES  	 nop  	SUB	len, 8*NBYTES		# subtract here for bgez loop  	.align	4  1: -EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc) -EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy) -EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy) -EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy) -EXC(	LOAD	t4, UNIT(4)(src),	.Ll_exc_copy) -EXC(	LOAD	t5, UNIT(5)(src),	.Ll_exc_copy) -EXC(	LOAD	t6, UNIT(6)(src),	.Ll_exc_copy) -EXC(	LOAD	t7, UNIT(7)(src),	.Ll_exc_copy) +	LOAD(t0, UNIT(0)(src), .Ll_exc\@) +	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) +	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) +	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@) +	LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@) +	LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@) +	LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@) +	LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)  	SUB	len, len, 8*NBYTES  	ADD	src, src, 8*NBYTES -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc) +	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)  	ADDC(sum, t0) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc) +	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)  	ADDC(sum, t1) -EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc) +	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)  	ADDC(sum, t2) -EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc) +	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)  	ADDC(sum, t3) -EXC(	STORE	t4, UNIT(4)(dst),	.Ls_exc) +	STORE(t4, UNIT(4)(dst),	.Ls_exc\@)  	ADDC(sum, t4) -EXC(	STORE	t5, UNIT(5)(dst),	.Ls_exc) +	STORE(t5, UNIT(5)(dst),	.Ls_exc\@)  	ADDC(sum, t5) -EXC(	STORE	t6, UNIT(6)(dst),	.Ls_exc) +	STORE(t6, UNIT(6)(dst),	.Ls_exc\@)  	ADDC(sum, t6) -EXC(	STORE	t7, UNIT(7)(dst),	.Ls_exc) +	STORE(t7, UNIT(7)(dst),	.Ls_exc\@)  	ADDC(sum, t7)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 8*NBYTES @@ -471,44 +525,44 @@ EXC(	STORE	t7, UNIT(7)(dst),	.Ls_exc)  	/*  	 * len == the number of bytes left to copy < 8*NBYTES  	 */ -.Lcleanup_both_aligned: +.Lcleanup_both_aligned\@:  #define rem t7 -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	 sltu	t0, len, 4*NBYTES -	bnez	t0, .Lless_than_4units +	bnez	t0, .Lless_than_4units\@  	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES  	/*  	 * len >= 4*NBYTES  	 */ -EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc) -EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy) -EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy) -EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy) +	LOAD(t0, UNIT(0)(src), .Ll_exc\@) +	LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@) +	LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@) +	LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)  	SUB	len, len, 4*NBYTES  	ADD	src, src, 4*NBYTES -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc) +	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)  	ADDC(sum, t0) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc) +	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)  	ADDC(sum, t1) -EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc) +	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)  	ADDC(sum, t2) -EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc) +	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)  	ADDC(sum, t3)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 4*NBYTES -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	.set	noreorder -.Lless_than_4units: +.Lless_than_4units\@:  	/*  	 * rem = len % NBYTES  	 */ -	beq	rem, len, .Lcopy_bytes +	beq	rem, len, .Lcopy_bytes\@  	 nop  1: -EXC(	LOAD	t0, 0(src),		.Ll_exc) +	LOAD(t0, 0(src), .Ll_exc\@)  	ADD	src, src, NBYTES  	SUB	len, len, NBYTES -EXC(	STORE	t0, 0(dst),		.Ls_exc) +	STORE(t0, 0(dst), .Ls_exc\@)  	ADDC(sum, t0)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, NBYTES @@ -527,20 +581,20 @@ EXC(	STORE	t0, 0(dst),		.Ls_exc)  	 * more instruction-level parallelism.  	 */  #define bits t2 -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	 ADD	t1, dst, len	# t1 is just past last byte of dst  	li	bits, 8*NBYTES  	SLL	rem, len, 3	# rem = number of bits to keep -EXC(	LOAD	t0, 0(src),		.Ll_exc) +	LOAD(t0, 0(src), .Ll_exc\@)  	SUB	bits, bits, rem # bits = number of bits to discard  	SHIFT_DISCARD t0, t0, bits -EXC(	STREST	t0, -1(t1),		.Ls_exc) +	STREST(t0, -1(t1), .Ls_exc\@)  	SHIFT_DISCARD_REVERT t0, t0, bits  	.set reorder  	ADDC(sum, t0) -	b	.Ldone +	b	.Ldone\@  	.set noreorder -.Ldst_unaligned: +.Ldst_unaligned\@:  	/*  	 * dst is unaligned  	 * t0 = src & ADDRMASK @@ -551,25 +605,25 @@ EXC(	STREST	t0, -1(t1),		.Ls_exc)  	 * Set match = (src and dst have same alignment)  	 */  #define match rem -EXC(	LDFIRST t3, FIRST(0)(src),	.Ll_exc) +	LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)  	ADD	t2, zero, NBYTES -EXC(	LDREST	t3, REST(0)(src),	.Ll_exc_copy) +	LDREST(t3, REST(0)(src), .Ll_exc_copy\@)  	SUB	t2, t2, t1	# t2 = number of bytes copied  	xor	match, t0, t1 -EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc) +	STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)  	SLL	t4, t1, 3		# t4 = number of bits to discard  	SHIFT_DISCARD t3, t3, t4  	/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */  	ADDC(sum, t3) -	beq	len, t2, .Ldone +	beq	len, t2, .Ldone\@  	 SUB	len, len, t2  	ADD	dst, dst, t2 -	beqz	match, .Lboth_aligned +	beqz	match, .Lboth_aligned\@  	 ADD	src, src, t2 -.Lsrc_unaligned_dst_aligned: +.Lsrc_unaligned_dst_aligned\@:  	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter -	beqz	t0, .Lcleanup_src_unaligned +	beqz	t0, .Lcleanup_src_unaligned\@  	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES  1:  /* @@ -578,53 +632,53 @@ EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc)   * It's OK to load FIRST(N+1) before REST(N) because the two addresses   * are to the same unit (unless src is aligned, but it's not).   */ -EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc) -EXC(	LDFIRST t1, FIRST(1)(src),	.Ll_exc_copy) +	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) +	LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)  	SUB	len, len, 4*NBYTES -EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy) -EXC(	LDREST	t1, REST(1)(src),	.Ll_exc_copy) -EXC(	LDFIRST t2, FIRST(2)(src),	.Ll_exc_copy) -EXC(	LDFIRST t3, FIRST(3)(src),	.Ll_exc_copy) -EXC(	LDREST	t2, REST(2)(src),	.Ll_exc_copy) -EXC(	LDREST	t3, REST(3)(src),	.Ll_exc_copy) +	LDREST(t0, REST(0)(src), .Ll_exc_copy\@) +	LDREST(t1, REST(1)(src), .Ll_exc_copy\@) +	LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@) +	LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@) +	LDREST(t2, REST(2)(src), .Ll_exc_copy\@) +	LDREST(t3, REST(3)(src), .Ll_exc_copy\@)  	ADD	src, src, 4*NBYTES  #ifdef CONFIG_CPU_SB1  	nop				# improves slotting  #endif -EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc) +	STORE(t0, UNIT(0)(dst),	.Ls_exc\@)  	ADDC(sum, t0) -EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc) +	STORE(t1, UNIT(1)(dst),	.Ls_exc\@)  	ADDC(sum, t1) -EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc) +	STORE(t2, UNIT(2)(dst),	.Ls_exc\@)  	ADDC(sum, t2) -EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc) +	STORE(t3, UNIT(3)(dst),	.Ls_exc\@)  	ADDC(sum, t3)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, 4*NBYTES  	bne	len, rem, 1b  	.set	noreorder -.Lcleanup_src_unaligned: -	beqz	len, .Ldone +.Lcleanup_src_unaligned\@: +	beqz	len, .Ldone\@  	 and	rem, len, NBYTES-1  # rem = len % NBYTES -	beq	rem, len, .Lcopy_bytes +	beq	rem, len, .Lcopy_bytes\@  	 nop  1: -EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc) -EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy) +	LDFIRST(t0, FIRST(0)(src), .Ll_exc\@) +	LDREST(t0, REST(0)(src), .Ll_exc_copy\@)  	ADD	src, src, NBYTES  	SUB	len, len, NBYTES -EXC(	STORE	t0, 0(dst),		.Ls_exc) +	STORE(t0, 0(dst), .Ls_exc\@)  	ADDC(sum, t0)  	.set	reorder				/* DADDI_WAR */  	ADD	dst, dst, NBYTES  	bne	len, rem, 1b  	.set	noreorder -.Lcopy_bytes_checklen: -	beqz	len, .Ldone +.Lcopy_bytes_checklen\@: +	beqz	len, .Ldone\@  	 nop -.Lcopy_bytes: +.Lcopy_bytes\@:  	/* 0 < len < NBYTES  */  #ifdef CONFIG_CPU_LITTLE_ENDIAN  #define SHIFT_START 0 @@ -637,12 +691,12 @@ EXC(	STORE	t0, 0(dst),		.Ls_exc)  	li	t3, SHIFT_START # shift  /* use .Ll_exc_copy here to return correct sum on fault */  #define COPY_BYTE(N)			\ -EXC(	lbu	t0, N(src), .Ll_exc_copy);	\ +	LOADBU(t0, N(src), .Ll_exc_copy\@);	\  	SUB	len, len, 1;		\ -EXC(	sb	t0, N(dst), .Ls_exc);	\ +	STOREB(t0, N(dst), .Ls_exc\@);	\  	SLLV	t0, t0, t3;		\  	addu	t3, SHIFT_INC;		\ -	beqz	len, .Lcopy_bytes_done; \ +	beqz	len, .Lcopy_bytes_done\@; \  	 or	t2, t0  	COPY_BYTE(0) @@ -653,15 +707,17 @@ EXC(	sb	t0, N(dst), .Ls_exc);	\  	COPY_BYTE(4)  	COPY_BYTE(5)  #endif -EXC(	lbu	t0, NBYTES-2(src), .Ll_exc_copy) +	LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)  	SUB	len, len, 1 -EXC(	sb	t0, NBYTES-2(dst), .Ls_exc) +	STOREB(t0, NBYTES-2(dst), .Ls_exc\@)  	SLLV	t0, t0, t3  	or	t2, t0 -.Lcopy_bytes_done: +.Lcopy_bytes_done\@:  	ADDC(sum, t2) -.Ldone: +.Ldone\@:  	/* fold checksum */ +	.set	push +	.set	noat  #ifdef USE_DOUBLE  	dsll32	v1, sum, 0  	daddu	sum, v1 @@ -684,12 +740,13 @@ EXC(	sb	t0, NBYTES-2(dst), .Ls_exc)  	or	sum, sum, t0  1:  #endif +	.set	pop  	.set reorder  	ADDC32(sum, psum)  	jr	ra  	.set noreorder -.Ll_exc_copy: +.Ll_exc_copy\@:  	/*  	 * Copy bytes from src until faulting load address (or until a  	 * lb faults) @@ -700,11 +757,11 @@ EXC(	sb	t0, NBYTES-2(dst), .Ls_exc)  	 *  	 * Assumes src < THREAD_BUADDR($28)  	 */ -	LOAD	t0, TI_TASK($28) +	LOADK	t0, TI_TASK($28)  	 li	t2, SHIFT_START -	LOAD	t0, THREAD_BUADDR(t0) +	LOADK	t0, THREAD_BUADDR(t0)  1: -EXC(	lbu	t1, 0(src),	.Ll_exc) +	LOADBU(t1, 0(src), .Ll_exc\@)  	ADD	src, src, 1  	sb	t1, 0(dst)	# can't fault -- we're copy_from_user  	SLLV	t1, t1, t2 @@ -714,10 +771,10 @@ EXC(	lbu	t1, 0(src),	.Ll_exc)  	ADD	dst, dst, 1  	bne	src, t0, 1b  	.set	noreorder -.Ll_exc: -	LOAD	t0, TI_TASK($28) +.Ll_exc\@: +	LOADK	t0, TI_TASK($28)  	 nop -	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address +	LOADK	t0, THREAD_BUADDR(t0)	# t0 is just past last good address  	 nop  	SUB	len, AT, t0		# len number of uncopied bytes  	/* @@ -733,7 +790,7 @@ EXC(	lbu	t1, 0(src),	.Ll_exc)  	 */  	.set	reorder				/* DADDI_WAR */  	SUB	src, len, 1 -	beqz	len, .Ldone +	beqz	len, .Ldone\@  	.set	noreorder  1:	sb	zero, 0(dst)  	ADD	dst, dst, 1 @@ -748,13 +805,31 @@ EXC(	lbu	t1, 0(src),	.Ll_exc)  	 SUB	src, src, v1  #endif  	li	v1, -EFAULT -	b	.Ldone +	b	.Ldone\@  	 sw	v1, (errptr) -.Ls_exc: +.Ls_exc\@:  	li	v0, -1 /* invalid checksum */  	li	v1, -EFAULT  	jr	ra  	 sw	v1, (errptr)  	.set	pop -	END(__csum_partial_copy_user) +	.endm + +LEAF(__csum_partial_copy_kernel) +#ifndef CONFIG_EVA +FEXPORT(__csum_partial_copy_to_user) +FEXPORT(__csum_partial_copy_from_user) +#endif +__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1 +END(__csum_partial_copy_kernel) + +#ifdef CONFIG_EVA +LEAF(__csum_partial_copy_to_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0 +END(__csum_partial_copy_to_user) + +LEAF(__csum_partial_copy_from_user) +__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0 +END(__csum_partial_copy_from_user) +#endif  | 
