diff options
Diffstat (limited to 'arch/parisc/lib/memcpy.c')
| -rw-r--r-- | arch/parisc/lib/memcpy.c | 148 |
1 files changed, 82 insertions, 66 deletions
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index b7098035321..b2b441b3234 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,6 +2,7 @@ * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung <tausq@debian.org> + * Copyright (C) 2013 Helge Deller <deller@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -53,10 +54,9 @@ */ #ifdef __KERNEL__ -#include <linux/config.h> #include <linux/module.h> #include <linux/compiler.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #define s_space "%%sr1" #define d_space "%%sr2" #else @@ -69,7 +69,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data); #define preserve_branch(label) do { \ - volatile int dummy; \ + volatile int dummy = 0; \ /* The following branch is never taken, it's just here to */ \ /* prevent gcc from optimizing away our exception code. */ \ if (unlikely(dummy != dummy)) \ @@ -92,35 +92,23 @@ DECLARE_PER_CPU(struct exception_data, exception_data); #define THRESHOLD 16 #ifdef DEBUG_MEMCPY -#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __FUNCTION__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) +#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) #else #define DPRINTF(fmt, args...) #endif -#ifndef __LP64__ -#define EXC_WORD ".word" -#else -#define EXC_WORD ".dword" -#endif - #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t" EXC_WORD "\t1b\n" \ - "\t" EXC_WORD "\t" #_e "\n" \ - "\t.previous\n" \ + "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ + ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : _tt(_t), "+r"(_a) \ : \ : "r8") #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t" EXC_WORD "\t1b\n" \ - "\t" EXC_WORD "\t" #_e "\n" \ - "\t.previous\n" \ + "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ + ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : "+r"(_a) \ : _tt(_t) \ : "r8") @@ -134,22 +122,16 @@ DECLARE_PER_CPU(struct exception_data, exception_data); #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ __asm__ __volatile__ ( \ - "1:\t" #_insn " " #_o "(" _s ",%1), %0\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t" EXC_WORD "\t1b\n" \ - "\t" EXC_WORD "\t" #_e "\n" \ - "\t.previous\n" \ + "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ + ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : _tt(_t) \ : "r"(_a) \ : "r8") #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ __asm__ __volatile__ ( \ - "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n" \ - "\t.section __ex_table,\"aw\"\n" \ - "\t" EXC_WORD "\t1b\n" \ - "\t" EXC_WORD "\t" #_e "\n" \ - "\t.previous\n" \ + "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ + ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ : \ : _tt(_t), "r"(_a) \ : "r8") @@ -158,31 +140,35 @@ DECLARE_PER_CPU(struct exception_data, exception_data); #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) #ifdef CONFIG_PREFETCH -extern inline void prefetch_src(const void *addr) +static inline void prefetch_src(const void *addr) { __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); } -extern inline void prefetch_dst(const void *addr) +static inline void prefetch_dst(const void *addr) { __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); } #else -#define prefetch_src(addr) -#define prefetch_dst(addr) +#define prefetch_src(addr) do { } while(0) +#define prefetch_dst(addr) do { } while(0) #endif +#define PA_MEMCPY_OK 0 +#define PA_MEMCPY_LOAD_ERROR 1 +#define PA_MEMCPY_STORE_ERROR 2 + /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words * per loop. This code is derived from glibc. */ -static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) +static noinline unsigned long copy_dstaligned(unsigned long dst, + unsigned long src, unsigned long len) { /* gcc complains that a2 and a3 may be uninitialized, but actually * they cannot be. Initialize a2/a3 to shut gcc up. */ register unsigned int a0, a1, a2 = 0, a3 = 0; int sh_1, sh_2; - struct exception_data *d; /* prefetch_src((const void *)src); */ @@ -216,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src goto do2; case 0: if (len == 0) - return 0; + return PA_MEMCPY_OK; /* a3 = ((unsigned int *) src)[0]; a0 = ((unsigned int *) src)[1]; */ ldw(s_space, 0, src, a3, cda_ldw_exc); @@ -275,42 +261,35 @@ do0: preserve_branch(handle_load_error); preserve_branch(handle_store_error); - return 0; + return PA_MEMCPY_OK; handle_load_error: __asm__ __volatile__ ("cda_ldw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len * 4 - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("cda_stw_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len * 4 - d->fault_addr + o_dst; + return PA_MEMCPY_STORE_ERROR; } -/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. + * In case of an access fault the faulty address can be read from the per_cpu + * exception data struct. */ +static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, + unsigned long len) { register unsigned long src, dst, t1, t2, t3; register unsigned char *pcs, *pcd; register unsigned int *pws, *pwd; register double *pds, *pdd; - unsigned long ret = 0; - unsigned long o_dst, o_src, o_len; - struct exception_data *d; + unsigned long ret; src = (unsigned long)srcp; dst = (unsigned long)dstp; pcs = (unsigned char *)srcp; pcd = (unsigned char *)dstp; - o_dst = dst; o_src = src; o_len = len; - /* prefetch_src((const void *)srcp); */ if (len < THRESHOLD) @@ -420,11 +399,11 @@ byte_copy: len--; } - return 0; + return PA_MEMCPY_OK; unaligned_copy: /* possibly we are aligned on a word, but not on a double... */ - if (likely(t1 & (sizeof(unsigned int)-1)) == 0) { + if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { t2 = src & (sizeof(unsigned int) - 1); if (unlikely(t2 != 0)) { @@ -457,8 +436,7 @@ unaligned_copy: src = (unsigned long)pcs; } - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), - o_dst, o_src, o_len); + ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); if (ret) return ret; @@ -473,17 +451,41 @@ unaligned_copy: handle_load_error: __asm__ __volatile__ ("pmc_load_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); - return o_len - d->fault_addr + o_src; + return PA_MEMCPY_LOAD_ERROR; handle_store_error: __asm__ __volatile__ ("pmc_store_exc:\n"); - d = &__get_cpu_var(exception_data); - DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); - return o_len - d->fault_addr + o_dst; + return PA_MEMCPY_STORE_ERROR; +} + + +/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ +static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +{ + unsigned long ret, fault_addr, reference; + struct exception_data *d; + + ret = pa_memcpy_internal(dstp, srcp, len); + if (likely(ret == PA_MEMCPY_OK)) + return 0; + + /* if a load or store fault occured we can get the faulty addr */ + d = this_cpu_ptr(&exception_data); + fault_addr = d->fault_addr; + + /* error in load or store? */ + if (ret == PA_MEMCPY_LOAD_ERROR) + reference = (unsigned long) srcp; + else + reference = (unsigned long) dstp; + + DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", + ret, len, fault_addr, reference); + + if (fault_addr >= reference) + return len - (fault_addr - reference); + else + return len; } #ifdef __KERNEL__ @@ -494,7 +496,8 @@ unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) return pa_memcpy((void __force *)dst, src, len); } -unsigned long copy_from_user(void *dst, const void __user *src, unsigned long len) +EXPORT_SYMBOL(__copy_from_user); +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_kernel_space(), 2); @@ -521,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(memcpy); + +long probe_kernel_read(void *dst, const void *src, size_t size) +{ + unsigned long addr = (unsigned long)src; + + if (addr < PAGE_SIZE) + return -EFAULT; + + /* check for I/O space F_EXTEND(0xfff00000) access as well? */ + + return __probe_kernel_read(dst, src, size); +} + #endif |
