aboutsummaryrefslogtreecommitdiff
path: root/arch/arc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/lib')
-rw-r--r--arch/arc/lib/memcmp.S6
-rw-r--r--arch/arc/lib/memcpy-700.S6
-rw-r--r--arch/arc/lib/memset.S10
-rw-r--r--arch/arc/lib/strchr-700.S16
-rw-r--r--arch/arc/lib/strcmp.S6
-rw-r--r--arch/arc/lib/strcpy-700.S6
-rw-r--r--arch/arc/lib/strlen.S6
7 files changed, 33 insertions, 23 deletions
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
index bc813d55b6c..978bf8314df 100644
--- a/arch/arc/lib/memcmp.S
+++ b/arch/arc/lib/memcmp.S
@@ -6,7 +6,7 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
#ifdef __LITTLE_ENDIAN__
#define WORD2 r2
@@ -16,7 +16,7 @@
#define SHIFT r2
#endif
-ARC_ENTRY memcmp
+ENTRY(memcmp)
or r12,r0,r1
asl_s r12,r12,30
sub r3,r2,1
@@ -121,4 +121,4 @@ ARC_ENTRY memcmp
.Lnil:
j_s.d [blink]
mov r0,0
-ARC_EXIT memcmp
+END(memcmp)
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
index b64cc10ac91..3222573e50d 100644
--- a/arch/arc/lib/memcpy-700.S
+++ b/arch/arc/lib/memcpy-700.S
@@ -6,9 +6,9 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY memcpy
+ENTRY(memcpy)
or r3,r0,r1
asl_s r3,r3,30
mov_s r5,r0
@@ -63,4 +63,4 @@ ARC_ENTRY memcpy
.Lendbloop:
j_s.d [blink]
stb r12,[r5,0]
-ARC_EXIT memcpy
+END(memcpy)
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
index 9b2d88d2e14..d36bd43fc98 100644
--- a/arch/arc/lib/memset.S
+++ b/arch/arc/lib/memset.S
@@ -6,11 +6,11 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
-ARC_ENTRY memset
+ENTRY(memset)
mov_s r4,r0
or r12,r0,r2
bmsk.f r12,r12,1
@@ -46,14 +46,14 @@ ARC_ENTRY memset
stb.ab r1,[r4,1]
.Ltiny_end:
j_s [blink]
-ARC_EXIT memset
+END(memset)
; memzero: @r0 = mem, @r1 = size_t
; memset: @r0 = mem, @r1 = char, @r2 = size_t
-ARC_ENTRY memzero
+ENTRY(memzero)
; adjust bzero args to memset args
mov r2, r1
mov r1, 0
b memset ;tail call so need to tinker with blink
-ARC_EXIT memzero
+END(memzero)
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d47..b725d586210 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -11,9 +11,9 @@
presence of the norm instruction makes it easier to operate on whole
words branch-free. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strchr
+ENTRY(strchr)
extb_s r1,r1
asl r5,r1,8
bmsk r2,r0,1
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
@@ -120,4 +130,4 @@ ARC_ENTRY strchr
j_s.d [blink]
mov.mi r0,0
#endif /* ENDIAN */
-ARC_EXIT strchr
+END(strchr)
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
index 5dc802b45cf..3544600fefe 100644
--- a/arch/arc/lib/strcmp.S
+++ b/arch/arc/lib/strcmp.S
@@ -13,9 +13,9 @@
source 1; however, that would increase the overhead for loop setup / finish,
and strcmp might often terminate early. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strcmp
+ENTRY(strcmp)
or r2,r0,r1
bmsk_s r2,r2,1
brne r2,0,.Lcharloop
@@ -93,4 +93,4 @@ ARC_ENTRY strcmp
.Lcmpend:
j_s.d [blink]
sub r0,r2,r3
-ARC_EXIT strcmp
+END(strcmp)
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
index b7ca4ae81d8..8422f38e121 100644
--- a/arch/arc/lib/strcpy-700.S
+++ b/arch/arc/lib/strcpy-700.S
@@ -16,9 +16,9 @@
there, but the it is not likely to be taken often, and it
would also be likey to cost an unaligned mispredict at the next call. */
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strcpy
+ENTRY(strcpy)
or r2,r0,r1
bmsk_s r2,r2,1
brne.d r2,0,charloop
@@ -67,4 +67,4 @@ charloop:
brne.d r3,0,charloop
stb.ab r3,[r10,1]
j [blink]
-ARC_EXIT strcpy
+END(strcpy)
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
index 39759e09969..53cfd5685a5 100644
--- a/arch/arc/lib/strlen.S
+++ b/arch/arc/lib/strlen.S
@@ -6,9 +6,9 @@
* published by the Free Software Foundation.
*/
-#include <asm/linkage.h>
+#include <linux/linkage.h>
-ARC_ENTRY strlen
+ENTRY(strlen)
or r3,r0,7
ld r2,[r3,-7]
ld.a r6,[r3,-3]
@@ -80,4 +80,4 @@ ARC_ENTRY strlen
.Learly_end:
b.d .Lend
sub_s.ne r1,r1,r1
-ARC_EXIT strlen
+END(strlen)