diff options
Diffstat (limited to 'arch/parisc/lib')
| -rw-r--r-- | arch/parisc/lib/Makefile | 3 | ||||
| -rw-r--r-- | arch/parisc/lib/bitops.c | 3 | ||||
| -rw-r--r-- | arch/parisc/lib/delay.c | 73 | ||||
| -rw-r--r-- | arch/parisc/lib/iomap.c | 24 | ||||
| -rw-r--r-- | arch/parisc/lib/lusercopy.S | 51 | ||||
| -rw-r--r-- | arch/parisc/lib/memcpy.c | 98 | ||||
| -rw-r--r-- | arch/parisc/lib/ucmpdi2.c | 25 | 
7 files changed, 171 insertions, 106 deletions
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 5f2e6904d14..8fa92b8d839 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,6 +2,7 @@  # Makefile for parisc-specific library files  # -lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o +lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ +	   ucmpdi2.o delay.o  obj-y	:= iomap.o diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index 353963d4205..187118841af 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -8,8 +8,7 @@  #include <linux/kernel.h>  #include <linux/spinlock.h> -#include <asm/system.h> -#include <asm/atomic.h> +#include <linux/atomic.h>  #ifdef CONFIG_SMP  arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { diff --git a/arch/parisc/lib/delay.c b/arch/parisc/lib/delay.c new file mode 100644 index 00000000000..ec9255f27a8 --- /dev/null +++ b/arch/parisc/lib/delay.c @@ -0,0 +1,73 @@ +/* + *	Precise Delay Loops for parisc + * + *	based on code by: + *	Copyright (C) 1993 Linus Torvalds + *	Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> + *	Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com> + * + *	parisc implementation: + *	Copyright (C) 2013 Helge Deller <deller@gmx.de> + */ + + +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/init.h> + +#include <asm/processor.h> +#include <asm/delay.h> + +#include <asm/special_insns.h>    /* for mfctl() */ +#include <asm/processor.h> /* for boot_cpu_data */ + +/* CR16 based delay: */ +static void __cr16_delay(unsigned long __loops) +{ +	/* +	 * Note: Due to unsigned math, cr16 rollovers shouldn't be +	 * a problem here. However, on 32 bit, we need to make sure +	 * we don't pass in too big a value. The current default +	 * value of MAX_UDELAY_MS should help prevent this. +	 */ +	u32 bclock, now, loops = __loops; +	int cpu; + +	preempt_disable(); +	cpu = smp_processor_id(); +	bclock = mfctl(16); +	for (;;) { +		now = mfctl(16); +		if ((now - bclock) >= loops) +			break; + +		/* Allow RT tasks to run */ +		preempt_enable(); +		asm volatile("	nop\n"); +		barrier(); +		preempt_disable(); + +		/* +		 * It is possible that we moved to another CPU, and +		 * since CR16's are per-cpu we need to calculate +		 * that. The delay must guarantee that we wait "at +		 * least" the amount of time. Being moved to another +		 * CPU could make the wait longer but we just need to +		 * make sure we waited long enough. Rebalance the +		 * counter for this CPU. +		 */ +		if (unlikely(cpu != smp_processor_id())) { +			loops -= (now - bclock); +			cpu = smp_processor_id(); +			bclock = mfctl(16); +		} +	} +	preempt_enable(); +} + + +void __udelay(unsigned long usecs) +{ +	__cr16_delay(usecs * ((unsigned long)boot_cpu_data.cpu_hz / 1000000UL)); +} +EXPORT_SYMBOL(__udelay); diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index 5069e8b2ca7..fb8e10a4fb3 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c @@ -5,6 +5,7 @@  #include <linux/ioport.h>  #include <linux/pci.h> +#include <linux/export.h>  #include <asm/io.h>  /* @@ -435,28 +436,6 @@ void ioport_unmap(void __iomem *addr)  	}  } -/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ -	resource_size_t start = pci_resource_start(dev, bar); -	resource_size_t len = pci_resource_len(dev, bar); -	unsigned long flags = pci_resource_flags(dev, bar); - -	if (!len || !start) -		return NULL; -	if (maxlen && len > maxlen) -		len = maxlen; -	if (flags & IORESOURCE_IO) -		return ioport_map(start, len); -	if (flags & IORESOURCE_MEM) { -		if (flags & IORESOURCE_CACHEABLE) -			return ioremap(start, len); -		return ioremap_nocache(start, len); -	} -	/* What? */ -	return NULL; -} -  void pci_iounmap(struct pci_dev *dev, void __iomem * addr)  {  	if (!INDIRECT_ADDR(addr)) { @@ -482,5 +461,4 @@ EXPORT_SYMBOL(iowrite16_rep);  EXPORT_SYMBOL(iowrite32_rep);  EXPORT_SYMBOL(ioport_map);  EXPORT_SYMBOL(ioport_unmap); -EXPORT_SYMBOL(pci_iomap);  EXPORT_SYMBOL(pci_iounmap); diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index 1bd23ccec17..a512f07d4fe 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -61,47 +61,6 @@  	.endm  	/* -	 * long lstrncpy_from_user(char *dst, const char *src, long n) -	 * -	 * Returns -EFAULT if exception before terminator, -	 *         N if the entire buffer filled, -	 *         otherwise strlen (i.e. excludes zero byte) -	 */ - -ENTRY(lstrncpy_from_user) -	.proc -	.callinfo NO_CALLS -	.entry -	comib,=     0,%r24,$lsfu_done -	copy        %r24,%r23 -	get_sr -1:      ldbs,ma     1(%sr1,%r25),%r1 -$lsfu_loop: -	stbs,ma     %r1,1(%r26) -	comib,=,n   0,%r1,$lsfu_done -	addib,<>,n  -1,%r24,$lsfu_loop -2:      ldbs,ma     1(%sr1,%r25),%r1 -$lsfu_done: -	sub         %r23,%r24,%r28 -$lsfu_exit: -	bv          %r0(%r2) -	nop -	.exit -ENDPROC(lstrncpy_from_user) - -	.section .fixup,"ax" -3:      fixup_branch $lsfu_exit -	ldi         -EFAULT,%r28 -	.previous - -	.section __ex_table,"aw" -	ASM_ULONG_INSN 1b,3b -	ASM_ULONG_INSN 2b,3b -	.previous - -	.procend - -	/*  	 * unsigned long lclear_user(void *to, unsigned long n)  	 *  	 * Returns 0 for success. @@ -129,9 +88,7 @@ ENDPROC(lclear_user)  	ldo        1(%r25),%r25  	.previous -	.section __ex_table,"aw" -	ASM_ULONG_INSN 1b,2b -	.previous +	ASM_EXCEPTIONTABLE_ENTRY(1b,2b)  	.procend @@ -170,10 +127,8 @@ ENDPROC(lstrnlen_user)  	copy        %r24,%r26    /* reset r26 so 0 is returned on fault */  	.previous -	.section __ex_table,"aw" -	ASM_ULONG_INSN 1b,3b -	ASM_ULONG_INSN 2b,3b -	.previous +	ASM_EXCEPTIONTABLE_ENTRY(1b,3b) +	ASM_EXCEPTIONTABLE_ENTRY(2b,3b)  	.procend diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index 1dbca5c31b3..b2b441b3234 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,6 +2,7 @@   *    Optimized memory copy routines.   *   *    Copyright (C) 2004 Randolph Chung <tausq@debian.org> + *    Copyright (C) 2013 Helge Deller <deller@gmx.de>   *   *    This program is free software; you can redistribute it and/or modify   *    it under the terms of the GNU General Public License as published by @@ -55,7 +56,7 @@  #ifdef __KERNEL__  #include <linux/module.h>  #include <linux/compiler.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h>  #define s_space "%%sr1"  #define d_space "%%sr2"  #else @@ -68,7 +69,7 @@  DECLARE_PER_CPU(struct exception_data, exception_data);  #define preserve_branch(label)	do {					\ -	volatile int dummy;						\ +	volatile int dummy = 0;						\  	/* The following branch is never taken, it's just here to  */	\  	/* prevent gcc from optimizing away our exception code. */ 	\  	if (unlikely(dummy != dummy))					\ @@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr)  #define prefetch_dst(addr) do { } while(0)  #endif +#define PA_MEMCPY_OK		0 +#define PA_MEMCPY_LOAD_ERROR	1 +#define PA_MEMCPY_STORE_ERROR	2 +  /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words   * per loop.  This code is derived from glibc.    */ -static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) +static noinline unsigned long copy_dstaligned(unsigned long dst, +					unsigned long src, unsigned long len)  {  	/* gcc complains that a2 and a3 may be uninitialized, but actually  	 * they cannot be.  Initialize a2/a3 to shut gcc up.  	 */  	register unsigned int a0, a1, a2 = 0, a3 = 0;  	int sh_1, sh_2; -	struct exception_data *d;  	/* prefetch_src((const void *)src); */ @@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src  			goto do2;  		case 0:  			if (len == 0) -				return 0; +				return PA_MEMCPY_OK;  			/* a3 = ((unsigned int *) src)[0];  			   a0 = ((unsigned int *) src)[1]; */  			ldw(s_space, 0, src, a3, cda_ldw_exc); @@ -256,42 +261,35 @@ do0:  	preserve_branch(handle_load_error);  	preserve_branch(handle_store_error); -	return 0; +	return PA_MEMCPY_OK;  handle_load_error:  	__asm__ __volatile__ ("cda_ldw_exc:\n"); -	d = &__get_cpu_var(exception_data); -	DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", -		o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); -	return o_len * 4 - d->fault_addr + o_src; +	return PA_MEMCPY_LOAD_ERROR;  handle_store_error:  	__asm__ __volatile__ ("cda_stw_exc:\n"); -	d = &__get_cpu_var(exception_data); -	DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", -		o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); -	return o_len * 4 - d->fault_addr + o_dst; +	return PA_MEMCPY_STORE_ERROR;  } -/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. + * In case of an access fault the faulty address can be read from the per_cpu + * exception data struct. */ +static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, +					unsigned long len)  {  	register unsigned long src, dst, t1, t2, t3;  	register unsigned char *pcs, *pcd;  	register unsigned int *pws, *pwd;  	register double *pds, *pdd; -	unsigned long ret = 0; -	unsigned long o_dst, o_src, o_len; -	struct exception_data *d; +	unsigned long ret;  	src = (unsigned long)srcp;  	dst = (unsigned long)dstp;  	pcs = (unsigned char *)srcp;  	pcd = (unsigned char *)dstp; -	o_dst = dst; o_src = src; o_len = len; -  	/* prefetch_src((const void *)srcp); */  	if (len < THRESHOLD) @@ -401,7 +399,7 @@ byte_copy:  		len--;  	} -	return 0; +	return PA_MEMCPY_OK;  unaligned_copy:  	/* possibly we are aligned on a word, but not on a double... */ @@ -438,8 +436,7 @@ unaligned_copy:  		src = (unsigned long)pcs;  	} -	ret = copy_dstaligned(dst, src, len / sizeof(unsigned int),  -		o_dst, o_src, o_len); +	ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));  	if (ret)  		return ret; @@ -454,17 +451,41 @@ unaligned_copy:  handle_load_error:  	__asm__ __volatile__ ("pmc_load_exc:\n"); -	d = &__get_cpu_var(exception_data); -	DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", -		o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); -	return o_len - d->fault_addr + o_src; +	return PA_MEMCPY_LOAD_ERROR;  handle_store_error:  	__asm__ __volatile__ ("pmc_store_exc:\n"); -	d = &__get_cpu_var(exception_data); -	DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", -		o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); -	return o_len - d->fault_addr + o_dst; +	return PA_MEMCPY_STORE_ERROR; +} + + +/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ +static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) +{ +	unsigned long ret, fault_addr, reference; +	struct exception_data *d; + +	ret = pa_memcpy_internal(dstp, srcp, len); +	if (likely(ret == PA_MEMCPY_OK)) +		return 0; + +	/* if a load or store fault occured we can get the faulty addr */ +	d = this_cpu_ptr(&exception_data); +	fault_addr = d->fault_addr; + +	/* error in load or store? */ +	if (ret == PA_MEMCPY_LOAD_ERROR) +		reference = (unsigned long) srcp; +	else +		reference = (unsigned long) dstp; + +	DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", +		ret, len, fault_addr, reference); + +	if (fault_addr >= reference) +		return len - (fault_addr - reference); +	else +		return len;  }  #ifdef __KERNEL__ @@ -503,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);  EXPORT_SYMBOL(copy_from_user);  EXPORT_SYMBOL(copy_in_user);  EXPORT_SYMBOL(memcpy); + +long probe_kernel_read(void *dst, const void *src, size_t size) +{ +	unsigned long addr = (unsigned long)src; + +	if (addr < PAGE_SIZE) +		return -EFAULT; + +	/* check for I/O space F_EXTEND(0xfff00000) access as well? */ + +	return __probe_kernel_read(dst, src, size); +} +  #endif diff --git a/arch/parisc/lib/ucmpdi2.c b/arch/parisc/lib/ucmpdi2.c new file mode 100644 index 00000000000..149c016f32c --- /dev/null +++ b/arch/parisc/lib/ucmpdi2.c @@ -0,0 +1,25 @@ +#include <linux/module.h> + +union ull_union { +	unsigned long long ull; +	struct { +		unsigned int high; +		unsigned int low; +	} ui; +}; + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ +	union ull_union au = {.ull = a}; +	union ull_union bu = {.ull = b}; + +	if (au.ui.high < bu.ui.high) +		return 0; +	else if (au.ui.high > bu.ui.high) +		return 2; +	if (au.ui.low < bu.ui.low) +		return 0; +	else if (au.ui.low > bu.ui.low) +		return 2; +	return 1; +}  | 
