diff options
Diffstat (limited to 'arch/s390/mm/maccess.c')
| -rw-r--r-- | arch/s390/mm/maccess.c | 133 | 
1 files changed, 125 insertions, 8 deletions
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 71a4b0d34be..2a2e35416d2 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -11,7 +11,10 @@  #include <linux/kernel.h>  #include <linux/types.h>  #include <linux/errno.h> -#include <asm/system.h> +#include <linux/gfp.h> +#include <linux/cpu.h> +#include <asm/ctl_reg.h> +#include <asm/io.h>  /*   * This function writes to kernel memory bypassing DAT and possible @@ -19,7 +22,7 @@   * using the stura instruction.   * Returns the number of bytes copied or -EFAULT.   */ -static long probe_kernel_write_odd(void *dst, void *src, size_t size) +static long probe_kernel_write_odd(void *dst, const void *src, size_t size)  {  	unsigned long count, aligned;  	int offset, mask; @@ -45,7 +48,7 @@ static long probe_kernel_write_odd(void *dst, void *src, size_t size)  	return rc ? rc : count;  } -long probe_kernel_write(void *dst, void *src, size_t size) +long probe_kernel_write(void *dst, const void *src, size_t size)  {  	long copied = 0; @@ -60,18 +63,14 @@ long probe_kernel_write(void *dst, void *src, size_t size)  	return copied < 0 ? -EFAULT : 0;  } -int memcpy_real(void *dest, void *src, size_t count) +static int __memcpy_real(void *dest, void *src, size_t count)  {  	register unsigned long _dest asm("2") = (unsigned long) dest;  	register unsigned long _len1 asm("3") = (unsigned long) count;  	register unsigned long _src  asm("4") = (unsigned long) src;  	register unsigned long _len2 asm("5") = (unsigned long) count; -	unsigned long flags;  	int rc = -EFAULT; -	if (!count) -		return 0; -	flags = __arch_local_irq_stnsm(0xf8UL);  	asm volatile (  		"0:	mvcle	%1,%2,0x0\n"  		"1:	jo	0b\n" @@ -82,6 +81,124 @@ int memcpy_real(void *dest, void *src, size_t count)  		  "+d" (_len2), "=m" (*((long *) dest))  		: "m" (*((long *) src))  		: "cc", "memory"); +	return rc; +} + +/* + * Copy memory in real mode (kernel to kernel) + */ +int memcpy_real(void *dest, void *src, size_t count) +{ +	unsigned long flags; +	int rc; + +	if (!count) +		return 0; +	local_irq_save(flags); +	__arch_local_irq_stnsm(0xfbUL); +	rc = __memcpy_real(dest, src, count); +	local_irq_restore(flags); +	return rc; +} + +/* + * Copy memory in absolute mode (kernel to kernel) + */ +void memcpy_absolute(void *dest, void *src, size_t count) +{ +	unsigned long cr0, flags, prefix; + +	flags = arch_local_irq_save(); +	__ctl_store(cr0, 0, 0); +	__ctl_clear_bit(0, 28); /* disable lowcore protection */ +	prefix = store_prefix(); +	if (prefix) { +		local_mcck_disable(); +		set_prefix(0); +		memcpy(dest, src, count); +		set_prefix(prefix); +		local_mcck_enable(); +	} else { +		memcpy(dest, src, count); +	} +	__ctl_load(cr0, 0, 0);  	arch_local_irq_restore(flags); +} + +/* + * Copy memory from kernel (real) to user (virtual) + */ +int copy_to_user_real(void __user *dest, void *src, unsigned long count) +{ +	int offs = 0, size, rc; +	char *buf; + +	buf = (char *) __get_free_page(GFP_KERNEL); +	if (!buf) +		return -ENOMEM; +	rc = -EFAULT; +	while (offs < count) { +		size = min(PAGE_SIZE, count - offs); +		if (memcpy_real(buf, src + offs, size)) +			goto out; +		if (copy_to_user(dest + offs, buf, size)) +			goto out; +		offs += size; +	} +	rc = 0; +out: +	free_page((unsigned long) buf);  	return rc;  } + +/* + * Check if physical address is within prefix or zero page + */ +static int is_swapped(unsigned long addr) +{ +	unsigned long lc; +	int cpu; + +	if (addr < sizeof(struct _lowcore)) +		return 1; +	for_each_online_cpu(cpu) { +		lc = (unsigned long) lowcore_ptr[cpu]; +		if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc) +			continue; +		return 1; +	} +	return 0; +} + +/* + * Convert a physical pointer for /dev/mem access + * + * For swapped prefix pages a new buffer is returned that contains a copy of + * the absolute memory. The buffer size is maximum one page large. + */ +void *xlate_dev_mem_ptr(unsigned long addr) +{ +	void *bounce = (void *) addr; +	unsigned long size; + +	get_online_cpus(); +	preempt_disable(); +	if (is_swapped(addr)) { +		size = PAGE_SIZE - (addr & ~PAGE_MASK); +		bounce = (void *) __get_free_page(GFP_ATOMIC); +		if (bounce) +			memcpy_absolute(bounce, (void *) addr, size); +	} +	preempt_enable(); +	put_online_cpus(); +	return bounce; +} + +/* + * Free converted buffer for /dev/mem access (if necessary) + */ +void unxlate_dev_mem_ptr(unsigned long addr, void *buf) +{ +	if ((void *) addr != buf) +		free_page((unsigned long) buf); +}  | 
