aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/lib')
-rw-r--r--arch/parisc/lib/Makefile3
-rw-r--r--arch/parisc/lib/bitops.c12
-rw-r--r--arch/parisc/lib/checksum.c19
-rw-r--r--arch/parisc/lib/delay.c73
-rw-r--r--arch/parisc/lib/fixup.S29
-rw-r--r--arch/parisc/lib/iomap.c30
-rw-r--r--arch/parisc/lib/lusercopy.S74
-rw-r--r--arch/parisc/lib/memcpy.c148
-rw-r--r--arch/parisc/lib/ucmpdi2.c25
9 files changed, 222 insertions, 191 deletions
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 5f2e6904d14..8fa92b8d839 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,6 +2,7 @@
# Makefile for parisc-specific library files
#
-lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o
+lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+ ucmpdi2.o delay.o
obj-y := iomap.o
diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
index 90f400b1028..187118841af 100644
--- a/arch/parisc/lib/bitops.c
+++ b/arch/parisc/lib/bitops.c
@@ -6,19 +6,17 @@
* Copyright 2000 Grant Grundler (grundler@cup.hp.com)
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_SMP
-raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
- [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED
+arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = {
+ [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED
};
#endif
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
unsigned long __xchg64(unsigned long x, unsigned long *ptr)
{
unsigned long temp, flags;
@@ -57,7 +55,7 @@ unsigned long __xchg8(char x, char *ptr)
}
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new)
{
unsigned long flags;
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c
index 8a1e08068e7..ae66d31f9ec 100644
--- a/arch/parisc/lib/checksum.c
+++ b/arch/parisc/lib/checksum.c
@@ -13,8 +13,6 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- *
- * $Id: checksum.c,v 1.3 1997/12/01 17:57:34 ralf Exp $
*/
#include <linux/module.h>
#include <linux/types.h>
@@ -101,11 +99,14 @@ out:
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
-unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum)
+/*
+ * why bother folding?
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned int result = do_csum(buff, len);
addc(result, sum);
- return from32to16(result);
+ return (__force __wsum)from32to16(result);
}
EXPORT_SYMBOL(csum_partial);
@@ -113,8 +114,8 @@ EXPORT_SYMBOL(csum_partial);
/*
* copy while checksumming, otherwise like csum_partial
*/
-unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
- int len, unsigned int sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum)
{
/*
* It's 2:30 am and I don't feel like doing it real ...
@@ -131,9 +132,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
* Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer.
*/
-unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
- unsigned char *dst, int len,
- unsigned int sum, int *err_ptr)
+__wsum csum_partial_copy_from_user(const void __user *src,
+ void *dst, int len,
+ __wsum sum, int *err_ptr)
{
int missing;
diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c
new file mode 100644
index 00000000000..ec9255f27a8
--- /dev/null
+++ b/arch/parisc/lib/delay.c
@@ -0,0 +1,73 @@
+/*
+ * Precise Delay Loops for parisc
+ *
+ * based on code by:
+ * Copyright (C) 1993 Linus Torvalds
+ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
+ *
+ * parisc implementation:
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ */
+
+
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/init.h>
+
+#include <asm/processor.h>
+#include <asm/delay.h>
+
+#include <asm/special_insns.h> /* for mfctl() */
+#include <asm/processor.h> /* for boot_cpu_data */
+
+/* CR16 based delay: */
+static void __cr16_delay(unsigned long __loops)
+{
+ /*
+ * Note: Due to unsigned math, cr16 rollovers shouldn't be
+ * a problem here. However, on 32 bit, we need to make sure
+ * we don't pass in too big a value. The current default
+ * value of MAX_UDELAY_MS should help prevent this.
+ */
+ u32 bclock, now, loops = __loops;
+ int cpu;
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ bclock = mfctl(16);
+ for (;;) {
+ now = mfctl(16);
+ if ((now - bclock) >= loops)
+ break;
+
+ /* Allow RT tasks to run */
+ preempt_enable();
+ asm volatile(" nop\n");
+ barrier();
+ preempt_disable();
+
+ /*
+ * It is possible that we moved to another CPU, and
+ * since CR16's are per-cpu we need to calculate
+ * that. The delay must guarantee that we wait "at
+ * least" the amount of time. Being moved to another
+ * CPU could make the wait longer but we just need to
+ * make sure we waited long enough. Rebalance the
+ * counter for this CPU.
+ */
+ if (unlikely(cpu != smp_processor_id())) {
+ loops -= (now - bclock);
+ cpu = smp_processor_id();
+ bclock = mfctl(16);
+ }
+ }
+ preempt_enable();
+}
+
+
+void __udelay(unsigned long usecs)
+{
+ __cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL));
+}
+EXPORT_SYMBOL(__udelay);
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index e0661c2978e..f8c45cc2947 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -19,10 +19,10 @@
*
* Fixup routines for kernel exception handling.
*/
-#include <linux/config.h>
#include <asm/asm-offsets.h>
#include <asm/assembly.h>
#include <asm/errno.h>
+#include <linux/linkage.h>
#ifdef CONFIG_SMP
.macro get_fault_ip t1 t2
@@ -31,13 +31,13 @@
/* t2 = smp_processor_id() */
mfctl 30,\t2
ldw TI_CPU(\t2),\t2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
extrd,u \t2,63,32,\t2
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t1
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t1
/* t1 = &__get_cpu_var(exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
#else
.macro get_fault_ip t1 t2
/* t1 = &__get_cpu_var(exception_data) */
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t2
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
@@ -59,33 +59,34 @@
.section .fixup, "ax"
/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
- .export fixup_get_user_skip_1
-fixup_get_user_skip_1:
+ENTRY(fixup_get_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
+ENDPROC(fixup_get_user_skip_1)
- .export fixup_get_user_skip_2
-fixup_get_user_skip_2:
+ENTRY(fixup_get_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
ldi -EFAULT, %r8
bv %r0(%r1)
copy %r0, %r9
+ENDPROC(fixup_get_user_skip_2)
/* put_user() fixups, store -EFAULT in r8 */
- .export fixup_put_user_skip_1
-fixup_put_user_skip_1:
+ENTRY(fixup_put_user_skip_1)
get_fault_ip %r1,%r8
ldo 4(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
+ENDPROC(fixup_put_user_skip_1)
- .export fixup_put_user_skip_2
-fixup_put_user_skip_2:
+ENTRY(fixup_put_user_skip_2)
get_fault_ip %r1,%r8
ldo 8(%r1), %r1
bv %r0(%r1)
ldi -EFAULT, %r8
+ENDPROC(fixup_put_user_skip_2)
+
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 01bec8fcbd0..fb8e10a4fb3 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -5,6 +5,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <asm/io.h>
/*
@@ -261,13 +262,9 @@ static const struct iomap_ops iomem_ops = {
iomem_write32r,
};
-const struct iomap_ops *iomap_ops[8] = {
+static const struct iomap_ops *iomap_ops[8] = {
[0] = &ioport_ops,
-#ifdef CONFIG_DEBUG_IOREMAP
- [6] = &iomem_ops,
-#else
[7] = &iomem_ops
-#endif
};
@@ -439,28 +436,6 @@ void ioport_unmap(void __iomem *addr)
}
}
-/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- unsigned long start = pci_resource_start(dev, bar);
- unsigned long len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (!len || !start)
- return NULL;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return ioport_map(start, len);
- if (flags & IORESOURCE_MEM) {
- if (flags & IORESOURCE_CACHEABLE)
- return ioremap(start, len);
- return ioremap_nocache(start, len);
- }
- /* What? */
- return NULL;
-}
-
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
if (!INDIRECT_ADDR(addr)) {
@@ -486,5 +461,4 @@ EXPORT_SYMBOL(iowrite16_rep);
EXPORT_SYMBOL(iowrite32_rep);
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
-EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index a0509855c9a..a512f07d4fe 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -37,6 +37,7 @@
#include <asm/assembly.h>
#include <asm/errno.h>
+#include <linux/linkage.h>
/*
* get_sr gets the appropriate space value into
@@ -60,60 +61,13 @@
.endm
/*
- * long lstrncpy_from_user(char *dst, const char *src, long n)
- *
- * Returns -EFAULT if exception before terminator,
- * N if the entire buffer filled,
- * otherwise strlen (i.e. excludes zero byte)
- */
-
- .export lstrncpy_from_user,code
-lstrncpy_from_user:
- .proc
- .callinfo NO_CALLS
- .entry
- comib,= 0,%r24,$lsfu_done
- copy %r24,%r23
- get_sr
-1: ldbs,ma 1(%sr1,%r25),%r1
-$lsfu_loop:
- stbs,ma %r1,1(%r26)
- comib,=,n 0,%r1,$lsfu_done
- addib,<>,n -1,%r24,$lsfu_loop
-2: ldbs,ma 1(%sr1,%r25),%r1
-$lsfu_done:
- sub %r23,%r24,%r28
-$lsfu_exit:
- bv %r0(%r2)
- nop
- .exit
-
- .section .fixup,"ax"
-3: fixup_branch $lsfu_exit
- ldi -EFAULT,%r28
- .previous
-
- .section __ex_table,"aw"
-#ifdef __LP64__
- .dword 1b,3b
- .dword 2b,3b
-#else
- .word 1b,3b
- .word 2b,3b
-#endif
- .previous
-
- .procend
-
- /*
* unsigned long lclear_user(void *to, unsigned long n)
*
* Returns 0 for success.
* otherwise, returns number of bytes not transferred.
*/
- .export lclear_user,code
-lclear_user:
+ENTRY(lclear_user)
.proc
.callinfo NO_CALLS
.entry
@@ -127,19 +81,14 @@ $lclu_done:
bv %r0(%r2)
copy %r25,%r28
.exit
+ENDPROC(lclear_user)
.section .fixup,"ax"
2: fixup_branch $lclu_done
ldo 1(%r25),%r25
.previous
- .section __ex_table,"aw"
-#ifdef __LP64__
- .dword 1b,2b
-#else
- .word 1b,2b
-#endif
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
.procend
@@ -151,8 +100,7 @@ $lclu_done:
* else strlen + 1 (i.e. includes zero byte).
*/
- .export lstrnlen_user,code
-lstrnlen_user:
+ENTRY(lstrnlen_user)
.proc
.callinfo NO_CALLS
.entry
@@ -172,21 +120,15 @@ $lslen_done:
$lslen_nzero:
b $lslen_done
ldo 1(%r26),%r26 /* special case for N == 0 */
+ENDPROC(lstrnlen_user)
.section .fixup,"ax"
3: fixup_branch $lslen_done
copy %r24,%r26 /* reset r26 so 0 is returned on fault */
.previous
- .section __ex_table,"aw"
-#ifdef __LP64__
- .dword 1b,3b
- .dword 2b,3b
-#else
- .word 1b,3b
- .word 2b,3b
-#endif
- .previous
+ ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+ ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
.procend
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index b7098035321..b2b441b3234 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,6 +2,7 @@
* Optimized memory copy routines.
*
* Copyright (C) 2004 Randolph Chung <tausq@debian.org>
+ * Copyright (C) 2013 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -53,10 +54,9 @@
*/
#ifdef __KERNEL__
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define s_space "%%sr1"
#define d_space "%%sr2"
#else
@@ -69,7 +69,7 @@
DECLARE_PER_CPU(struct exception_data, exception_data);
#define preserve_branch(label) do { \
- volatile int dummy; \
+ volatile int dummy = 0; \
/* The following branch is never taken, it's just here to */ \
/* prevent gcc from optimizing away our exception code. */ \
if (unlikely(dummy != dummy)) \
@@ -92,35 +92,23 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define THRESHOLD 16
#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
+#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
#else
#define DPRINTF(fmt, args...)
#endif
-#ifndef __LP64__
-#define EXC_WORD ".word"
-#else
-#define EXC_WORD ".dword"
-#endif
-
#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
- "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n" \
- "\t.section __ex_table,\"aw\"\n" \
- "\t" EXC_WORD "\t1b\n" \
- "\t" EXC_WORD "\t" #_e "\n" \
- "\t.previous\n" \
+ "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t), "+r"(_a) \
: \
: "r8")
#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
__asm__ __volatile__ ( \
- "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n" \
- "\t.section __ex_table,\"aw\"\n" \
- "\t" EXC_WORD "\t1b\n" \
- "\t" EXC_WORD "\t" #_e "\n" \
- "\t.previous\n" \
+ "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: "+r"(_a) \
: _tt(_t) \
: "r8")
@@ -134,22 +122,16 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
__asm__ __volatile__ ( \
- "1:\t" #_insn " " #_o "(" _s ",%1), %0\n" \
- "\t.section __ex_table,\"aw\"\n" \
- "\t" EXC_WORD "\t1b\n" \
- "\t" EXC_WORD "\t" #_e "\n" \
- "\t.previous\n" \
+ "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: _tt(_t) \
: "r"(_a) \
: "r8")
#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
__asm__ __volatile__ ( \
- "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n" \
- "\t.section __ex_table,\"aw\"\n" \
- "\t" EXC_WORD "\t1b\n" \
- "\t" EXC_WORD "\t" #_e "\n" \
- "\t.previous\n" \
+ "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
+ ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
: \
: _tt(_t), "r"(_a) \
: "r8")
@@ -158,31 +140,35 @@ DECLARE_PER_CPU(struct exception_data, exception_data);
#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
#ifdef CONFIG_PREFETCH
-extern inline void prefetch_src(const void *addr)
+static inline void prefetch_src(const void *addr)
{
__asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
}
-extern inline void prefetch_dst(const void *addr)
+static inline void prefetch_dst(const void *addr)
{
__asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
}
#else
-#define prefetch_src(addr)
-#define prefetch_dst(addr)
+#define prefetch_src(addr) do { } while(0)
+#define prefetch_dst(addr) do { } while(0)
#endif
+#define PA_MEMCPY_OK 0
+#define PA_MEMCPY_LOAD_ERROR 1
+#define PA_MEMCPY_STORE_ERROR 2
+
/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
* per loop. This code is derived from glibc.
*/
-static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len)
+static noinline unsigned long copy_dstaligned(unsigned long dst,
+ unsigned long src, unsigned long len)
{
/* gcc complains that a2 and a3 may be uninitialized, but actually
* they cannot be. Initialize a2/a3 to shut gcc up.
*/
register unsigned int a0, a1, a2 = 0, a3 = 0;
int sh_1, sh_2;
- struct exception_data *d;
/* prefetch_src((const void *)src); */
@@ -216,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src
goto do2;
case 0:
if (len == 0)
- return 0;
+ return PA_MEMCPY_OK;
/* a3 = ((unsigned int *) src)[0];
a0 = ((unsigned int *) src)[1]; */
ldw(s_space, 0, src, a3, cda_ldw_exc);
@@ -275,42 +261,35 @@ do0:
preserve_branch(handle_load_error);
preserve_branch(handle_store_error);
- return 0;
+ return PA_MEMCPY_OK;
handle_load_error:
__asm__ __volatile__ ("cda_ldw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len * 4 - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("cda_stw_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len * 4 - d->fault_addr + o_dst;
+ return PA_MEMCPY_STORE_ERROR;
}
-/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
+ * In case of an access fault the faulty address can be read from the per_cpu
+ * exception data struct. */
+static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
+ unsigned long len)
{
register unsigned long src, dst, t1, t2, t3;
register unsigned char *pcs, *pcd;
register unsigned int *pws, *pwd;
register double *pds, *pdd;
- unsigned long ret = 0;
- unsigned long o_dst, o_src, o_len;
- struct exception_data *d;
+ unsigned long ret;
src = (unsigned long)srcp;
dst = (unsigned long)dstp;
pcs = (unsigned char *)srcp;
pcd = (unsigned char *)dstp;
- o_dst = dst; o_src = src; o_len = len;
-
/* prefetch_src((const void *)srcp); */
if (len < THRESHOLD)
@@ -420,11 +399,11 @@ byte_copy:
len--;
}
- return 0;
+ return PA_MEMCPY_OK;
unaligned_copy:
/* possibly we are aligned on a word, but not on a double... */
- if (likely(t1 & (sizeof(unsigned int)-1)) == 0) {
+ if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
t2 = src & (sizeof(unsigned int) - 1);
if (unlikely(t2 != 0)) {
@@ -457,8 +436,7 @@ unaligned_copy:
src = (unsigned long)pcs;
}
- ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),
- o_dst, o_src, o_len);
+ ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
if (ret)
return ret;
@@ -473,17 +451,41 @@ unaligned_copy:
handle_load_error:
__asm__ __volatile__ ("pmc_load_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n",
- o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src);
- return o_len - d->fault_addr + o_src;
+ return PA_MEMCPY_LOAD_ERROR;
handle_store_error:
__asm__ __volatile__ ("pmc_store_exc:\n");
- d = &__get_cpu_var(exception_data);
- DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n",
- o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst);
- return o_len - d->fault_addr + o_dst;
+ return PA_MEMCPY_STORE_ERROR;
+}
+
+
+/* Returns 0 for success, otherwise, returns number of bytes not transferred. */
+static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+{
+ unsigned long ret, fault_addr, reference;
+ struct exception_data *d;
+
+ ret = pa_memcpy_internal(dstp, srcp, len);
+ if (likely(ret == PA_MEMCPY_OK))
+ return 0;
+
+ /* if a load or store fault occured we can get the faulty addr */
+ d = this_cpu_ptr(&exception_data);
+ fault_addr = d->fault_addr;
+
+ /* error in load or store? */
+ if (ret == PA_MEMCPY_LOAD_ERROR)
+ reference = (unsigned long) srcp;
+ else
+ reference = (unsigned long) dstp;
+
+ DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
+ ret, len, fault_addr, reference);
+
+ if (fault_addr >= reference)
+ return len - (fault_addr - reference);
+ else
+ return len;
}
#ifdef __KERNEL__
@@ -494,7 +496,8 @@ unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
return pa_memcpy((void __force *)dst, src, len);
}
-unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len)
+EXPORT_SYMBOL(__copy_from_user);
+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len)
{
mtsp(get_user_space(), 1);
mtsp(get_kernel_space(), 2);
@@ -521,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_in_user);
EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+ unsigned long addr = (unsigned long)src;
+
+ if (addr < PAGE_SIZE)
+ return -EFAULT;
+
+ /* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+ return __probe_kernel_read(dst, src, size);
+}
+
#endif
diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c
new file mode 100644
index 00000000000..149c016f32c
--- /dev/null
+++ b/arch/parisc/lib/ucmpdi2.c
@@ -0,0 +1,25 @@
+#include <linux/module.h>
+
+union ull_union {
+ unsigned long long ull;
+ struct {
+ unsigned int high;
+ unsigned int low;
+ } ui;
+};
+
+int __ucmpdi2(unsigned long long a, unsigned long long b)
+{
+ union ull_union au = {.ull = a};
+ union ull_union bu = {.ull = b};
+
+ if (au.ui.high < bu.ui.high)
+ return 0;
+ else if (au.ui.high > bu.ui.high)
+ return 2;
+ if (au.ui.low < bu.ui.low)
+ return 0;
+ else if (au.ui.low > bu.ui.low)
+ return 2;
+ return 1;
+}