aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/lib')
-rw-r--r--arch/arm/lib/Makefile13
-rw-r--r--arch/arm/lib/backtrace.S14
-rw-r--r--arch/arm/lib/bitops.h1
-rw-r--r--arch/arm/lib/clear_user.S8
-rw-r--r--arch/arm/lib/copy_from_user.S4
-rw-r--r--arch/arm/lib/copy_page.S2
-rw-r--r--arch/arm/lib/copy_to_user.S4
-rw-r--r--arch/arm/lib/csumipv6.S2
-rw-r--r--arch/arm/lib/delay.S18
-rw-r--r--arch/arm/lib/ecard.S4
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/io-readsb.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S6
-rw-r--r--arch/arm/lib/io-writesb.S6
-rw-r--r--arch/arm/lib/io-writesw-armv3.S6
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S4
-rw-r--r--arch/arm/lib/memzero.S4
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strncpy_from_user.S7
-rw-r--r--arch/arm/lib/strnlen_user.S9
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/uaccess.S16
-rw-r--r--arch/arm/lib/ucmpdi2.S1
24 files changed, 73 insertions, 78 deletions
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 7b726b627ea..30351cd4560 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,28 +6,31 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- copy_page.o delay.o findbit.o memchr.o memcpy.o \
+ delay.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
- getuser.o putuser.o clear_user.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o sha1.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+mmu-y := clear_user.o copy_page.o getuser.o putuser.o
+
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
- lib-y += uaccess.o
+ mmu-y += uaccess.o
endif
endif
+lib-$(CONFIG_MMU) += $(mmu-y)
+
ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 16153c86c3f..74230083cbf 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -10,7 +10,6 @@
* 27/03/03 Ian Molton Clean up CONFIG_CPU
*
*/
-#include <linux/config.h>
#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -41,7 +40,7 @@ ENTRY(c_backtrace)
movne r0, #0
movs frame, r0
1: moveq r0, #-2
- LOADREGS(eqfd, sp!, {r4 - r8, pc})
+ ldmeqfd sp!, {r4 - r8, pc}
2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction
ldr r0, [sp], #4
@@ -85,7 +84,7 @@ ENTRY(c_backtrace)
* A zero next framepointer means we're done.
*/
teq next, #0
- LOADREGS(eqfd, sp!, {r4 - r8, pc})
+ ldmeqfd sp!, {r4 - r8, pc}
/*
* The next framepointer must be above the
@@ -97,16 +96,13 @@ ENTRY(c_backtrace)
b 1007f
/*
- * Fixup for LDMDB
+ * Fixup for LDMDB. Note that this must not be in the fixup section.
*/
- .section .fixup,"ax"
- .align 0
1007: ldr r0, =.Lbad
mov r1, frame
bl printk
- LOADREGS(fd, sp!, {r4 - r8, pc})
+ ldmfd sp!, {r4 - r8, pc}
.ltorg
- .previous
.section __ex_table,"a"
.align 3
@@ -145,7 +141,7 @@ ENTRY(c_backtrace)
adrne r0, .Lcr
blne printk
mov r0, stack
- LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc})
+ ldmfd sp!, {instr, reg, stack, r7, r8, pc}
.Lfp: .asciz " r%d = %08X%c"
.Lcr: .asciz "\n"
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index b8c14e93669..54225102174 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,4 +1,3 @@
-#include <linux/config.h>
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
.macro bitop, instr
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 7ff9f831b3f..ecb28dcdaf7 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,13 +12,13 @@
.text
-/* Prototype: int __arch_clear_user(void *addr, size_t sz)
+/* Prototype: int __clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
-ENTRY(__arch_clear_user)
+ENTRY(__clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -43,10 +43,10 @@ USER( strnebt r2, [r0], #1)
tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1
USER( strnebt r2, [r0], #1)
mov r0, #0
- LOADREGS(fd,sp!, {r1, pc})
+ ldmfd sp!, {r1, pc}
.section .fixup,"ax"
.align 0
-9001: LOADREGS(fd,sp!, {r0, pc})
+9001: ldmfd sp!, {r0, pc}
.previous
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7497393a0e8..6b7363ce749 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __arch_copy_from_user(void *to, const void *from, size_t n)
+ * size_t __copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -83,7 +83,7 @@
.text
-ENTRY(__arch_copy_from_user)
+ENTRY(__copy_from_user)
#include "copy_template.S"
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 68117968482..666c99cc074 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -43,4 +43,4 @@ ENTRY(copy_page)
bgt 1b @ 1
PLD( ldmeqia r1!, {r3, r4, ip, lr} )
PLD( beq 2b )
- LOADREGS(fd, sp!, {r4, pc}) @ 3
+ ldmfd sp!, {r4, pc} @ 3
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 4a6d8ea1402..5224d94688d 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -16,7 +16,7 @@
/*
* Prototype:
*
- * size_t __arch_copy_to_user(void *to, const void *from, size_t n)
+ * size_t __copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -86,7 +86,7 @@
.text
-ENTRY(__arch_copy_to_user)
+ENTRY(__copy_to_user)
#include "copy_template.S"
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S
index 7065a20ee8a..9621469beec 100644
--- a/arch/arm/lib/csumipv6.S
+++ b/arch/arm/lib/csumipv6.S
@@ -28,5 +28,5 @@ ENTRY(__csum_ipv6_magic)
adcs r0, r0, r3
adcs r0, r0, r2
adcs r0, r0, #0
- LOADREGS(fd, sp!, {pc})
+ ldmfd sp!, {pc}
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 9183b06c0e2..930a7025922 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -31,7 +31,7 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
mov r2, r2, lsr #10 @ max = 0x00007fff
mul r0, r2, r0 @ max = 2^32-1
movs r0, r0, lsr #6
- RETINSTR(moveq,pc,lr)
+ moveq pc, lr
/*
* loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -43,20 +43,20 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
ENTRY(__delay)
subs r0, r0, #1
#if 0
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
- RETINSTR(movls,pc,lr)
+ movls pc, lr
subs r0, r0, #1
#endif
bhi __delay
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/ecard.S b/arch/arm/lib/ecard.S
index fb7b602a6f7..c55aaa2a208 100644
--- a/arch/arm/lib/ecard.S
+++ b/arch/arm/lib/ecard.S
@@ -29,7 +29,7 @@ ENTRY(ecard_loader_read)
CPSR2SPSR(r0)
mov lr, pc
mov pc, r2
- LOADREGS(fd, sp!, {r4 - r12, pc})
+ ldmfd sp!, {r4 - r12, pc}
@ Purpose: call an expansion card loader to reset the card
@ Proto : void read_loader(int card_base, char *loader);
@@ -41,5 +41,5 @@ ENTRY(ecard_loader_reset)
CPSR2SPSR(r0)
mov lr, pc
add pc, r1, #8
- LOADREGS(fd, sp!, {r4 - r12, pc})
+ ldmfd sp!, {r4 - r12, pc}
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 6f8e27a58c7..a5ca0248aa4 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -32,7 +32,7 @@ ENTRY(_find_first_zero_bit_le)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
/*
* Purpose : Find next 'zero' bit
@@ -66,7 +66,7 @@ ENTRY(_find_first_bit_le)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
/*
* Purpose : Find next 'one' bit
@@ -98,7 +98,7 @@ ENTRY(_find_first_zero_bit_be)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
ENTRY(_find_next_zero_bit_be)
teq r1, #0
@@ -126,7 +126,7 @@ ENTRY(_find_first_bit_be)
2: cmp r2, r1 @ any more?
blo 1b
3: mov r0, r1 @ no free bits
- RETINSTR(mov,pc,lr)
+ mov pc, lr
ENTRY(_find_next_bit_be)
teq r1, #0
@@ -164,5 +164,5 @@ ENTRY(_find_next_bit_be)
addeq r2, r2, #1
mov r0, r2
#endif
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index d3d8de71a2c..fb966ad0276 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -72,7 +72,7 @@ ENTRY(__raw_readsb)
bpl .Linsb_16_lp
tst r2, #15
- LOADREGS(eqfd, sp!, {r4 - r6, pc})
+ ldmeqfd sp!, {r4 - r6, pc}
.Linsb_no_16: tst r2, #8
beq .Linsb_no_8
@@ -109,7 +109,7 @@ ENTRY(__raw_readsb)
str r3, [r1], #4
.Linsb_no_4: ands r2, r2, #3
- LOADREGS(eqfd, sp!, {r4 - r6, pc})
+ ldmeqfd sp!, {r4 - r6, pc}
cmp r2, #2
ldrb r3, [r0]
@@ -119,4 +119,4 @@ ENTRY(__raw_readsb)
ldrgtb r3, [r0]
strgtb r3, [r1]
- LOADREGS(fd, sp!, {r4 - r6, pc})
+ ldmfd sp!, {r4 - r6, pc}
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 146d47c1545..4ef90418514 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -28,7 +28,7 @@
strb r3, [r1], #1
subs r2, r2, #1
- RETINSTR(moveq, pc, lr)
+ moveq pc, lr
ENTRY(__raw_readsw)
teq r2, #0 @ do we have to check for the zero len?
@@ -69,7 +69,7 @@ ENTRY(__raw_readsw)
bpl .Linsw_8_lp
tst r2, #7
- LOADREGS(eqfd, sp!, {r4, r5, r6, pc})
+ ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_insw_8: tst r2, #4
beq .Lno_insw_4
@@ -102,6 +102,6 @@ ENTRY(__raw_readsw)
movne r3, r3, lsr #8
strneb r3, [r1]
- LOADREGS(fd, sp!, {r4, r5, r6, pc})
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 08209fc640e..7eba2b6cc69 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -64,7 +64,7 @@ ENTRY(__raw_writesb)
bpl .Loutsb_16_lp
tst r2, #15
- LOADREGS(eqfd, sp!, {r4, r5, pc})
+ ldmeqfd sp!, {r4, r5, pc}
.Loutsb_no_16: tst r2, #8
beq .Loutsb_no_8
@@ -80,7 +80,7 @@ ENTRY(__raw_writesb)
outword r3
.Loutsb_no_4: ands r2, r2, #3
- LOADREGS(eqfd, sp!, {r4, r5, pc})
+ ldmeqfd sp!, {r4, r5, pc}
cmp r2, #2
ldrb r3, [r1], #1
@@ -90,4 +90,4 @@ ENTRY(__raw_writesb)
ldrgtb r3, [r1]
strgtb r3, [r0]
- LOADREGS(fd, sp!, {r4, r5, pc})
+ ldmfd sp!, {r4, r5, pc}
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 52d62b48129..1607a29f49b 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -29,7 +29,7 @@
orr r3, r3, r3, lsl #16
str r3, [r0]
subs r2, r2, #1
- RETINSTR(moveq, pc, lr)
+ moveq pc, lr
ENTRY(__raw_writesw)
teq r2, #0 @ do we have to check for the zero len?
@@ -80,7 +80,7 @@ ENTRY(__raw_writesw)
bpl .Loutsw_8_lp
tst r2, #7
- LOADREGS(eqfd, sp!, {r4, r5, r6, pc})
+ ldmeqfd sp!, {r4, r5, r6, pc}
.Lno_outsw_8: tst r2, #4
beq .Lno_outsw_4
@@ -124,4 +124,4 @@ ENTRY(__raw_writesw)
orrne ip, ip, ip, lsr #16
strne ip, [r0]
- LOADREGS(fd, sp!, {r4, r5, r6, pc})
+ ldmfd sp!, {r4, r5, r6, pc}
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index ac34fe55d21..e7ab1ea8eba 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,4 +22,4 @@ ENTRY(memchr)
bne 1b
sub r0, r0, #1
2: movne r0, #0
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a1795f59993..95b110b07a8 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -53,7 +53,7 @@ ENTRY(memset)
stmgeia r0!, {r1, r3, ip, lr}
stmgeia r0!, {r1, r3, ip, lr}
bgt 2b
- LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go.
+ ldmeqfd sp!, {pc} @ Now <64 bytes to go.
/*
* No need to correct the count; we're only testing bits from now on
*/
@@ -77,4 +77,4 @@ ENTRY(memset)
strneb r1, [r0], #1
tst r2, #1
strneb r1, [r0], #1
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 51ccc60160f..abf2508e822 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -53,7 +53,7 @@ ENTRY(__memzero)
stmgeia r0!, {r2, r3, ip, lr} @ 4
stmgeia r0!, {r2, r3, ip, lr} @ 4
bgt 3b @ 1
- LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit
+ ldmeqfd sp!, {pc} @ 1/2 quick exit
/*
* No need to correct the count; we're only testing bits from now on
*/
@@ -77,4 +77,4 @@ ENTRY(__memzero)
strneb r2, [r0], #1 @ 1
tst r1, #1 @ 1 a byte left over
strneb r2, [r0], #1 @ 1
- RETINSTR(mov,pc,lr) @ 1
+ mov pc, lr @ 1
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index 5b9b493733f..9f18d6fdee6 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,4 +23,4 @@ ENTRY(strchr)
teq r2, r1
movne r0, #0
subeq r0, r0, #1
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S
index 629cc877527..36e3741a377 100644
--- a/arch/arm/lib/strncpy_from_user.S
+++ b/arch/arm/lib/strncpy_from_user.S
@@ -20,8 +20,7 @@
* returns the number of characters copied (strlen of copied string),
* -EFAULT on exception, or "len" if we fill the whole buffer
*/
-ENTRY(__arch_strncpy_from_user)
- save_lr
+ENTRY(__strncpy_from_user)
mov ip, r1
1: subs r2, r2, #1
USER( ldrplbt r3, [r1], #1)
@@ -31,13 +30,13 @@ USER( ldrplbt r3, [r1], #1)
bne 1b
sub r1, r1, #1 @ take NUL character out of count
2: sub r0, r1, ip
- restore_pc
+ mov pc, lr
.section .fixup,"ax"
.align 0
9001: mov r3, #0
strb r3, [r0, #0] @ null terminate
mov r0, #-EFAULT
- restore_pc
+ mov pc, lr
.previous
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S
index 67bcd826812..18d8fa4f925 100644
--- a/arch/arm/lib/strnlen_user.S
+++ b/arch/arm/lib/strnlen_user.S
@@ -14,14 +14,13 @@
.text
.align 5
-/* Prototype: unsigned long __arch_strnlen_user(const char *str, long n)
+/* Prototype: unsigned long __strnlen_user(const char *str, long n)
* Purpose : get length of a string in user memory
* Params : str - address of string in user memory
* Returns : length of string *including terminator*
* or zero on exception, or n + 1 if too long
*/
-ENTRY(__arch_strnlen_user)
- save_lr
+ENTRY(__strnlen_user)
mov r2, r0
1:
USER( ldrbt r3, [r0], #1)
@@ -31,10 +30,10 @@ USER( ldrbt r3, [r0], #1)
bne 1b
add r0, r0, #1
2: sub r0, r0, r2
- restore_pc
+ mov pc, lr
.section .fixup,"ax"
.align 0
9001: mov r0, #0
- restore_pc
+ mov pc, lr
.previous
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index fa923f026f1..538df220aa4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,4 +22,4 @@ ENTRY(strrchr)
teq r2, #0
bne 1b
mov r0, r3
- RETINSTR(mov,pc,lr)
+ mov pc, lr
diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
index 0cc450f863b..b48bd6d5fd8 100644
--- a/arch/arm/lib/uaccess.S
+++ b/arch/arm/lib/uaccess.S
@@ -19,7 +19,7 @@
#define PAGE_SHIFT 12
-/* Prototype: int __arch_copy_to_user(void *to, const char *from, size_t n)
+/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
* Purpose : copy a block to user memory from kernel memory
* Params : to - user memory
* : from - kernel memory
@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
sub r2, r2, ip
b .Lc2u_dest_aligned
-ENTRY(__arch_copy_to_user)
+ENTRY(__copy_to_user)
stmfd sp!, {r2, r4 - r7, lr}
cmp r2, #4
blt .Lc2u_not_enough
@@ -105,7 +105,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
movs ip, r2
bne .Lc2u_nowords
.Lc2u_finished: mov r0, #0
- LOADREGS(fd,sp!,{r2, r4 - r7, pc})
+ ldmfd sp!, {r2, r4 - r7, pc}
.Lc2u_src_not_aligned:
bic r1, r1, #3
@@ -280,10 +280,10 @@ USER( strgtbt r3, [r0], #1) @ May fault
.section .fixup,"ax"
.align 0
-9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc})
+9001: ldmfd sp!, {r0, r4 - r7, pc}
.previous
-/* Prototype: unsigned long __arch_copy_from_user(void *to,const void *from,unsigned long n);
+/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
* Purpose : copy a block from user memory to kernel memory
* Params : to - kernel memory
* : from - user memory
@@ -302,7 +302,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
sub r2, r2, ip
b .Lcfu_dest_aligned
-ENTRY(__arch_copy_from_user)
+ENTRY(__copy_from_user)
stmfd sp!, {r0, r2, r4 - r7, lr}
cmp r2, #4
blt .Lcfu_not_enough
@@ -369,7 +369,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
bne .Lcfu_nowords
.Lcfu_finished: mov r0, #0
add sp, sp, #8
- LOADREGS(fd,sp!,{r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
.Lcfu_src_not_aligned:
bic r1, r1, #3
@@ -556,6 +556,6 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
movne r1, r4
blne __memzero
mov r0, r4
- LOADREGS(fd,sp!, {r4 - r7, pc})
+ ldmfd sp!, {r4 - r7, pc}
.previous
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index d847a62834c..f76de07ac18 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/config.h>
#include <linux/linkage.h>
#ifdef __ARMEB__