aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile7
-rw-r--r--arch/s390/lib/delay.c63
-rw-r--r--arch/s390/lib/div64.c4
-rw-r--r--arch/s390/lib/find.c77
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/mem64.S88
-rw-r--r--arch/s390/lib/qrnnd.S5
-rw-r--r--arch/s390/lib/spinlock.c188
-rw-r--r--arch/s390/lib/string.c59
-rw-r--r--arch/s390/lib/uaccess.c406
-rw-r--r--arch/s390/lib/uaccess.h23
-rw-r--r--arch/s390/lib/uaccess_mvcos.c227
-rw-r--r--arch/s390/lib/uaccess_pt.c401
-rw-r--r--arch/s390/lib/uaccess_std.c319
-rw-r--r--arch/s390/lib/usercopy.c8
15 files changed, 809 insertions, 1158 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 761ab8b56af..c6d752e8bf2 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,8 +2,7 @@
# Makefile for s390-specific library files..
#
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
-obj-y += usercopy.o
-obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
-lib-$(CONFIG_64BIT) += uaccess_mvcos.o
+lib-y += delay.o string.o uaccess.o find.o
+obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
+obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 752b362bf65..a9f3d0042d5 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,7 +1,7 @@
/*
* Precise Delay Loops for S390
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
+#include <asm/vtimer.h>
+#include <asm/div64.h>
void __delay(unsigned long loops)
{
@@ -27,46 +29,43 @@ void __delay(unsigned long loops)
static void __udelay_disabled(unsigned long long usecs)
{
- unsigned long mask, cr0, cr0_saved;
- u64 clock_saved;
+ unsigned long cr0, cr6, new;
+ u64 clock_saved, end;
+ end = get_tod_clock() + (usecs << 12);
clock_saved = local_tick_disable();
- set_clock_comparator(get_clock() + (usecs << 12));
- __ctl_store(cr0_saved, 0, 0);
- cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
- __ctl_load(cr0 , 0, 0);
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
+ __ctl_store(cr0, 0, 0);
+ __ctl_store(cr6, 6, 6);
+ new = (cr0 & 0xffff00e0) | 0x00000800;
+ __ctl_load(new , 0, 0);
+ new = 0;
+ __ctl_load(new, 6, 6);
lockdep_off();
- trace_hardirqs_on();
- __load_psw_mask(mask);
- local_irq_disable();
+ do {
+ set_clock_comparator(end);
+ vtime_stop_cpu();
+ } while (get_tod_clock_fast() < end);
lockdep_on();
- __ctl_load(cr0_saved, 0, 0);
+ __ctl_load(cr0, 0, 0);
+ __ctl_load(cr6, 6, 6);
local_tick_enable(clock_saved);
- set_clock_comparator(S390_lowcore.clock_comparator);
}
static void __udelay_enabled(unsigned long long usecs)
{
- unsigned long mask;
- u64 clock_saved;
- u64 end;
+ u64 clock_saved, end;
- mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO;
- end = get_clock() + (usecs << 12);
+ end = get_tod_clock_fast() + (usecs << 12);
do {
clock_saved = 0;
if (end < S390_lowcore.clock_comparator) {
clock_saved = local_tick_disable();
set_clock_comparator(end);
}
- trace_hardirqs_on();
- __load_psw_mask(mask);
- local_irq_disable();
+ vtime_stop_cpu();
if (clock_saved)
local_tick_enable(clock_saved);
- } while (get_clock() < end);
- set_clock_comparator(S390_lowcore.clock_comparator);
+ } while (get_tod_clock_fast() < end);
}
/*
@@ -110,7 +109,21 @@ void udelay_simple(unsigned long long usecs)
{
u64 end;
- end = get_clock() + (usecs << 12);
- while (get_clock() < end)
+ end = get_tod_clock_fast() + (usecs << 12);
+ while (get_tod_clock_fast() < end)
cpu_relax();
}
+
+void __ndelay(unsigned long long nsecs)
+{
+ u64 end;
+
+ nsecs <<= 9;
+ do_div(nsecs, 125);
+ end = get_tod_clock_fast() + nsecs;
+ if (nsecs & ~0xfffUL)
+ __udelay(nsecs >> 12);
+ while (get_tod_clock_fast() < end)
+ barrier();
+}
+EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
index d9e62c0b576..261152f8324 100644
--- a/arch/s390/lib/div64.c
+++ b/arch/s390/lib/div64.c
@@ -1,9 +1,7 @@
/*
- * arch/s390/lib/div64.c
- *
* __div64_32 implementation for 31 bit.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644
index 00000000000..922003c1b90
--- /dev/null
+++ b/arch/s390/lib/find.c
@@ -0,0 +1,77 @@
+/*
+ * MSB0 numbered special bitops handling.
+ *
+ * On s390x the bits are numbered:
+ * |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The reason for this bit numbering is the fact that the hardware sets bits
+ * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+ * from the 'wrong end'.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
+{
+ const unsigned long *p = addr;
+ unsigned long result = 0;
+ unsigned long tmp;
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ if ((tmp = *(p++)))
+ goto found;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_first_bit_inv);
+
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ const unsigned long *p = addr + (offset / BITS_PER_LONG);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset %= BITS_PER_LONG;
+ if (offset) {
+ tmp = *(p++);
+ tmp &= (~0UL >> offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+ while (size & ~(BITS_PER_LONG-1)) {
+ if ((tmp = *(p++)))
+ goto found_middle;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = *p;
+found_first:
+ tmp &= (~0UL << (BITS_PER_LONG - size));
+ if (!tmp) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_next_bit_inv);
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
new file mode 100644
index 00000000000..14ca9244b61
--- /dev/null
+++ b/arch/s390/lib/mem32.S
@@ -0,0 +1,92 @@
+/*
+ * String handling functions.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * memset implementation
+ *
+ * This code corresponds to the C construct below. We do distinguish
+ * between clearing (c == 0) and setting a memory array (c != 0) simply
+ * because nearly all memset invocations in the kernel clear memory and
+ * the xc instruction is preferred in such cases.
+ *
+ * void *memset(void *s, int c, size_t n)
+ * {
+ * if (likely(c == 0))
+ * return __builtin_memset(s, 0, n);
+ * return __builtin_memset(s, c, n);
+ * }
+ */
+ENTRY(memset)
+ basr %r5,%r0
+.Lmemset_base:
+ ltr %r4,%r4
+ bzr %r14
+ ltr %r3,%r3
+ jnz .Lmemset_fill
+ ahi %r4,-1
+ lr %r3,%r4
+ srl %r3,8
+ ltr %r3,%r3
+ lr %r1,%r2
+ je .Lmemset_clear_rest
+.Lmemset_clear_loop:
+ xc 0(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brct %r3,.Lmemset_clear_loop
+.Lmemset_clear_rest:
+ ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
+ br %r14
+.Lmemset_fill:
+ stc %r3,0(%r2)
+ chi %r4,1
+ lr %r1,%r2
+ ber %r14
+ ahi %r4,-2
+ lr %r3,%r4
+ srl %r3,8
+ ltr %r3,%r3
+ je .Lmemset_fill_rest
+.Lmemset_fill_loop:
+ mvc 1(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brct %r3,.Lmemset_fill_loop
+.Lmemset_fill_rest:
+ ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
+ br %r14
+.Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+.Lmemset_mvc:
+ mvc 1(1,%r1),0(%r1)
+
+/*
+ * memcpy implementation
+ *
+ * void *memcpy(void *dest, const void *src, size_t n)
+ */
+ENTRY(memcpy)
+ basr %r5,%r0
+.Lmemcpy_base:
+ ltr %r4,%r4
+ bzr %r14
+ ahi %r4,-1
+ lr %r0,%r4
+ srl %r0,8
+ ltr %r0,%r0
+ lr %r1,%r2
+ jnz .Lmemcpy_loop
+.Lmemcpy_rest:
+ ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
+ br %r14
+.Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brct %r0,.Lmemcpy_loop
+ j .Lmemcpy_rest
+.Lmemcpy_mvc:
+ mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem64.S
new file mode 100644
index 00000000000..c6d553e85ab
--- /dev/null
+++ b/arch/s390/lib/mem64.S
@@ -0,0 +1,88 @@
+/*
+ * String handling functions.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * memset implementation
+ *
+ * This code corresponds to the C construct below. We do distinguish
+ * between clearing (c == 0) and setting a memory array (c != 0) simply
+ * because nearly all memset invocations in the kernel clear memory and
+ * the xc instruction is preferred in such cases.
+ *
+ * void *memset(void *s, int c, size_t n)
+ * {
+ * if (likely(c == 0))
+ * return __builtin_memset(s, 0, n);
+ * return __builtin_memset(s, c, n);
+ * }
+ */
+ENTRY(memset)
+ ltgr %r4,%r4
+ bzr %r14
+ ltgr %r3,%r3
+ jnz .Lmemset_fill
+ aghi %r4,-1
+ srlg %r3,%r4,8
+ ltgr %r3,%r3
+ lgr %r1,%r2
+ jz .Lmemset_clear_rest
+.Lmemset_clear_loop:
+ xc 0(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r3,.Lmemset_clear_loop
+.Lmemset_clear_rest:
+ larl %r3,.Lmemset_xc
+ ex %r4,0(%r3)
+ br %r14
+.Lmemset_fill:
+ stc %r3,0(%r2)
+ cghi %r4,1
+ lgr %r1,%r2
+ ber %r14
+ aghi %r4,-2
+ srlg %r3,%r4,8
+ ltgr %r3,%r3
+ jz .Lmemset_fill_rest
+.Lmemset_fill_loop:
+ mvc 1(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r3,.Lmemset_fill_loop
+.Lmemset_fill_rest:
+ larl %r3,.Lmemset_mvc
+ ex %r4,0(%r3)
+ br %r14
+.Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+.Lmemset_mvc:
+ mvc 1(1,%r1),0(%r1)
+
+/*
+ * memcpy implementation
+ *
+ * void *memcpy(void *dest, const void *src, size_t n)
+ */
+ENTRY(memcpy)
+ ltgr %r4,%r4
+ bzr %r14
+ aghi %r4,-1
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
+ lgr %r1,%r2
+ jnz .Lmemcpy_loop
+.Lmemcpy_rest:
+ larl %r5,.Lmemcpy_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brctg %r5,.Lmemcpy_loop
+ j .Lmemcpy_rest
+.Lmemcpy_mvc:
+ mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
index eb1df632e74..d321329130e 100644
--- a/arch/s390/lib/qrnnd.S
+++ b/arch/s390/lib/qrnnd.S
@@ -1,5 +1,7 @@
# S/390 __udiv_qrnnd
+#include <linux/linkage.h>
+
# r2 : &__r
# r3 : upper half of 64 bit word n
# r4 : lower half of 64 bit word n
@@ -8,8 +10,7 @@
# the quotient q is to be returned
.text
- .globl __udiv_qrnnd
-__udiv_qrnnd:
+ENTRY(__udiv_qrnnd)
st %r2,24(%r15) # store pointer to reminder for later
lr %r0,%r3 # reload n
lr %r1,%r4
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 91754ffb920..5b0e445bc3f 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -1,8 +1,7 @@
/*
- * arch/s390/lib/spinlock.c
* Out of line spinlock code.
*
- * Copyright (C) IBM Corp. 2004, 2006
+ * Copyright IBM Corp. 2004, 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -10,6 +9,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/init.h>
+#include <linux/smp.h>
#include <asm/io.h>
int spin_retry = 1000;
@@ -24,108 +24,102 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
-static inline void _raw_yield(void)
-{
- if (MACHINE_HAS_DIAG44)
- asm volatile("diag 0,0,0x44");
-}
-
-static inline void _raw_yield_cpu(int cpu)
-{
- if (MACHINE_HAS_DIAG9C)
- asm volatile("diag %0,0,0x9c"
- : : "d" (cpu_logical_map(cpu)));
- else
- _raw_yield();
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
- int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
+ int count;
while (1) {
- owner = lp->owner_cpu;
- if (!owner || smp_vcpu_scheduled(~owner)) {
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
- continue;
- if (_raw_compare_and_swap(&lp->owner_cpu, 0,
- cpu) == 0)
- return;
- }
- if (MACHINE_IS_LPAR)
- continue;
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ continue;
}
- owner = lp->owner_cpu;
- if (owner)
- _raw_yield_cpu(~owner);
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- return;
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
+ smp_yield_cpu(~owner);
+ continue;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
+ smp_yield_cpu(~owner);
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
- int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
+ int count;
local_irq_restore(flags);
while (1) {
- owner = lp->owner_cpu;
- if (!owner || smp_vcpu_scheduled(~owner)) {
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
- continue;
- local_irq_disable();
- if (_raw_compare_and_swap(&lp->owner_cpu, 0,
- cpu) == 0)
- return;
- local_irq_restore(flags);
- }
- if (MACHINE_IS_LPAR)
- continue;
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ local_irq_disable();
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return;
+ local_irq_restore(flags);
}
- owner = lp->owner_cpu;
- if (owner)
- _raw_yield_cpu(~owner);
- local_irq_disable();
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
- return;
- local_irq_restore(flags);
+ /* Check if the lock owner is running. */
+ if (!smp_vcpu_scheduled(~owner)) {
+ smp_yield_cpu(~owner);
+ continue;
+ }
+ /* Loop for a while on the lock value. */
+ count = spin_retry;
+ do {
+ owner = ACCESS_ONCE(lp->lock);
+ } while (owner && count-- > 0);
+ if (!owner)
+ continue;
+ /*
+ * For multiple layers of hypervisors, e.g. z/VM + LPAR
+ * yield the CPU if the lock is still unavailable.
+ */
+ if (!MACHINE_IS_LPAR)
+ smp_yield_cpu(~owner);
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+ unsigned int cpu = lp->lock;
+ if (cpu != 0) {
+ if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+ !smp_vcpu_scheduled(~cpu))
+ smp_yield_cpu(~cpu);
+ }
+}
+EXPORT_SYMBOL(arch_spin_relax);
+
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
- unsigned int cpu = ~smp_processor_id();
int count;
- for (count = spin_retry; count > 0; count--) {
- if (arch_spin_is_locked(lp))
- continue;
- if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
+ for (count = spin_retry; count > 0; count--)
+ if (arch_spin_trylock_once(lp))
return 1;
- }
return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
-void arch_spin_relax(arch_spinlock_t *lock)
-{
- unsigned int cpu = lock->owner_cpu;
- if (cpu != 0) {
- if (MACHINE_IS_VM || MACHINE_IS_KVM ||
- !smp_vcpu_scheduled(~cpu))
- _raw_yield_cpu(~cpu);
- }
-}
-EXPORT_SYMBOL(arch_spin_relax);
-
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
unsigned int old;
@@ -133,13 +127,13 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
while (1) {
if (count-- <= 0) {
- _raw_yield();
+ smp_yield();
count = spin_retry;
}
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
}
@@ -153,15 +147,16 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
local_irq_restore(flags);
while (1) {
if (count-- <= 0) {
- _raw_yield();
+ smp_yield();
count = spin_retry;
}
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
+ local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_read_lock_wait_flags);
@@ -172,10 +167,10 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
int count = spin_retry;
while (count-- > 0) {
- if (!arch_read_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if ((int) old < 0)
continue;
- old = rw->lock & 0x7fffffffU;
- if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
+ if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
return 0;
@@ -184,16 +179,18 @@ EXPORT_SYMBOL(_raw_read_trylock_retry);
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
+ unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
- _raw_yield();
+ smp_yield();
count = spin_retry;
}
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
}
}
@@ -201,31 +198,36 @@ EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
{
+ unsigned int old;
int count = spin_retry;
local_irq_restore(flags);
while (1) {
if (count-- <= 0) {
- _raw_yield();
+ smp_yield();
count = spin_retry;
}
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
local_irq_disable();
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return;
+ local_irq_restore(flags);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait_flags);
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
+ unsigned int old;
int count = spin_retry;
while (count-- > 0) {
- if (!arch_write_can_lock(rw))
+ old = ACCESS_ONCE(rw->lock);
+ if (old)
continue;
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
+ if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
return 0;
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 4143b7c1909..b647d5ff0ad 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -1,9 +1,8 @@
/*
- * arch/s390/lib/string.c
* Optimized string functions
*
* S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -44,11 +43,7 @@ static inline char *__strnend(const char *s, size_t n)
*/
size_t strlen(const char *s)
{
-#if __GNUC__ < 4
return __strend(s) - s;
-#else
- return __builtin_strlen(s);
-#endif
}
EXPORT_SYMBOL(strlen);
@@ -74,7 +69,6 @@ EXPORT_SYMBOL(strnlen);
*/
char *strcpy(char *dest, const char *src)
{
-#if __GNUC__ < 4
register int r0 asm("0") = 0;
char *ret = dest;
@@ -83,9 +77,6 @@ char *strcpy(char *dest, const char *src)
: "+&a" (dest), "+&a" (src) : "d" (r0)
: "cc", "memory" );
return ret;
-#else
- return __builtin_strcpy(dest, src);
-#endif
}
EXPORT_SYMBOL(strcpy);
@@ -107,7 +98,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
if (size) {
size_t len = (ret >= size) ? size-1 : ret;
dest[len] = '\0';
- __builtin_memcpy(dest, src, len);
+ memcpy(dest, src, len);
}
return ret;
}
@@ -125,8 +116,8 @@ EXPORT_SYMBOL(strlcpy);
char *strncpy(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
- __builtin_memset(dest + len, 0, n - len);
- __builtin_memcpy(dest, src, len);
+ memset(dest + len, 0, n - len);
+ memcpy(dest, src, len);
return dest;
}
EXPORT_SYMBOL(strncpy);
@@ -172,7 +163,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
if (len >= n)
len = n - 1;
dest[len] = '\0';
- __builtin_memcpy(dest, src, len);
+ memcpy(dest, src, len);
}
return res;
}
@@ -195,7 +186,7 @@ char *strncat(char *dest, const char *src, size_t n)
char *p = __strend(dest);
p[len] = '\0';
- __builtin_memcpy(p, src, len);
+ memcpy(p, src, len);
return dest;
}
EXPORT_SYMBOL(strncat);
@@ -349,41 +340,3 @@ void *memscan(void *s, int c, size_t n)
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
-
-/**
- * memcpy - Copy one area of memory to another
- * @dest: Where to copy to
- * @src: Where to copy from
- * @n: The size of the area.
- *
- * returns a pointer to @dest
- */
-void *memcpy(void *dest, const void *src, size_t n)
-{
- return __builtin_memcpy(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy);
-
-/**
- * memset - Fill a region of memory with the given value
- * @s: Pointer to the start of the area.
- * @c: The byte to fill the area with
- * @n: The size of the area.
- *
- * returns a pointer to @s
- */
-void *memset(void *s, int c, size_t n)
-{
- char *xs;
-
- if (c == 0)
- return __builtin_memset(s, 0, n);
-
- xs = (char *) s;
- if (n > 0)
- do {
- *xs++ = c;
- } while (--n > 0);
- return s;
-}
-EXPORT_SYMBOL(memset);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
new file mode 100644
index 00000000000..53dd5d7a0c9
--- /dev/null
+++ b/arch/s390/lib/uaccess.c
@@ -0,0 +1,406 @@
+/*
+ * Standard user space access functions based on mvcp/mvcs and doing
+ * interesting things in the secondary space mode.
+ *
+ * Copyright IBM Corp. 2006,2014
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/jump_label.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/facility.h>
+
+#ifndef CONFIG_64BIT
+#define AHI "ahi"
+#define ALR "alr"
+#define CLR "clr"
+#define LHI "lhi"
+#define SLR "slr"
+#else
+#define AHI "aghi"
+#define ALR "algr"
+#define CLR "clgr"
+#define LHI "lghi"
+#define SLR "slgr"
+#endif
+
+static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+
+static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x81UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
+ "9: jz 7f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 4f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
+ "10:"SLR" %0,%4\n"
+ " "ALR" %2,%4\n"
+ "4:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,6f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "5: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "6:"AHI" %4,-256\n"
+ " jnm 5b\n"
+ " ex %4,0(%3)\n"
+ " j 8f\n"
+ "7:"SLR" %0,%0\n"
+ "8:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
+ unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ load_kernel_asce();
+ tmp1 = -256UL;
+ asm volatile(
+ " sacf 0\n"
+ "0: mvcp 0(%0,%2),0(%1),%3\n"
+ "10:jz 8f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcp 0(%0,%2),0(%1),%3\n"
+ "11:jnz 1b\n"
+ " j 8f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "4: mvcp 0(%4,%2),0(%1),%3\n"
+ "12:"SLR" %0,%4\n"
+ " "ALR" %2,%4\n"
+ "5:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,7f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "6: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "7:"AHI" %4,-256\n"
+ " jnm 6b\n"
+ " ex %4,0(%3)\n"
+ " j 9f\n"
+ "8:"SLR" %0,%0\n"
+ "9: sacf 768\n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
+ EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_from_user_mvcos(to, from, n);
+ return copy_from_user_mvcp(to, from, n);
+}
+EXPORT_SYMBOL(__copy_from_user);
+
+static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ "6: jz 4f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
+ "7:"SLR" %0,%4\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
+ unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ load_kernel_asce();
+ tmp1 = -256UL;
+ asm volatile(
+ " sacf 0\n"
+ "0: mvcs 0(%0,%1),0(%2),%3\n"
+ "7: jz 5f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcs 0(%0,%1),0(%2),%3\n"
+ "8: jnz 1b\n"
+ " j 5f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "4: mvcs 0(%4,%1),0(%2),%3\n"
+ "9:"SLR" %0,%4\n"
+ " j 6f\n"
+ "5:"SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_to_user_mvcos(to, from, n);
+ return copy_to_user_mvcs(to, from, n);
+}
+EXPORT_SYMBOL(__copy_to_user);
+
+static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810081UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ /* FIXME: copy with reduced length. */
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ " jz 2f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2:"SLR" %0,%0\n"
+ "3: \n"
+ EX_TABLE(0b,3b)
+ : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ unsigned long tmp1;
+
+ load_kernel_asce();
+ asm volatile(
+ " sacf 256\n"
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " bras %3,3f\n"
+ "0:"AHI" %0,257\n"
+ "1: mvc 0(1,%1),0(%2)\n"
+ " la %1,1(%1)\n"
+ " la %2,1(%2)\n"
+ " "AHI" %0,-1\n"
+ " jnz 1b\n"
+ " j 5f\n"
+ "2: mvc 0(256,%1),0(%2)\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,1b-0b(%3)\n"
+ "5: "SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_in_user_mvcos(to, from, n);
+ return copy_in_user_mvc(to, from, n);
+}
+EXPORT_SYMBOL(__copy_in_user);
+
+static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+ " jz 4f\n"
+ "1:"ALR" %0,%2\n"
+ " "SLR" %1,%2\n"
+ " j 0b\n"
+ "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
+ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
+ " "SLR" %3,%1\n"
+ " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+ " "SLR" %0,%3\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+ : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ load_kernel_asce();
+ asm volatile(
+ " sacf 256\n"
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " bras %3,3f\n"
+ " xc 0(1,%1),0(%1)\n"
+ "0:"AHI" %0,257\n"
+ " la %2,255(%1)\n" /* %2 = ptr + 255 */
+ " srl %2,12\n"
+ " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
+ " "SLR" %2,%1\n"
+ " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " jnh 5f\n"
+ " "AHI" %2,-1\n"
+ "1: ex %2,0(%3)\n"
+ " "AHI" %2,1\n"
+ " "SLR" %0,%2\n"
+ " j 5f\n"
+ "2: xc 0(256,%1),0(%1)\n"
+ " la %1,256(%1)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,0(%3)\n"
+ "5: "SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __clear_user(void __user *to, unsigned long size)
+{
+ if (static_key_false(&have_mvcos))
+ return clear_user_mvcos(to, size);
+ return clear_user_xc(to, size);
+}
+EXPORT_SYMBOL(__clear_user);
+
+static inline unsigned long strnlen_user_srst(const char __user *src,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0;
+ unsigned long tmp1, tmp2;
+
+ asm volatile(
+ " la %2,0(%1)\n"
+ " la %3,0(%0,%1)\n"
+ " "SLR" %0,%0\n"
+ " sacf 256\n"
+ "0: srst %3,%2\n"
+ " jo 0b\n"
+ " la %0,1(%3)\n" /* strnlen_user results includes \0 */
+ " "SLR" %0,%1\n"
+ "1: sacf 768\n"
+ EX_TABLE(0b,1b)
+ : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+unsigned long __strnlen_user(const char __user *src, unsigned long size)
+{
+ if (unlikely(!size))
+ return 0;
+ load_kernel_asce();
+ return strnlen_user_srst(src, size);
+}
+EXPORT_SYMBOL(__strnlen_user);
+
+long __strncpy_from_user(char *dst, const char __user *src, long size)
+{
+ size_t done, len, offset, len_str;
+
+ if (unlikely(size <= 0))
+ return 0;
+ done = 0;
+ do {
+ offset = (size_t)src & ~PAGE_MASK;
+ len = min(size - done, PAGE_SIZE - offset);
+ if (copy_from_user(dst, src, len))
+ return -EFAULT;
+ len_str = strnlen(dst, len);
+ done += len_str;
+ src += len_str;
+ dst += len_str;
+ } while ((len_str == len) && (done < size));
+ return done;
+}
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/*
+ * The "old" uaccess variant without mvcos can be enforced with the
+ * uaccess_primary kernel parameter. This is mainly for debugging purposes.
+ */
+static int uaccess_primary __initdata;
+
+static int __init parse_uaccess_pt(char *__unused)
+{
+ uaccess_primary = 1;
+ return 0;
+}
+early_param("uaccess_primary", parse_uaccess_pt);
+
+static int __init uaccess_init(void)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+ static_key_slow_inc(&have_mvcos);
+ return 0;
+}
+early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
deleted file mode 100644
index 126011df14f..00000000000
--- a/arch/s390/lib/uaccess.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * arch/s390/uaccess.h
- *
- * Copyright IBM Corp. 2007
- *
- */
-
-#ifndef __ARCH_S390_LIB_UACCESS_H
-#define __ARCH_S390_LIB_UACCESS_H
-
-extern size_t copy_from_user_std(size_t, const void __user *, void *);
-extern size_t copy_to_user_std(size_t, void __user *, const void *);
-extern size_t strnlen_user_std(size_t, const char __user *);
-extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
-extern int futex_atomic_cmpxchg_std(int __user *, int, int);
-extern int futex_atomic_op_std(int, int __user *, int, int *);
-
-extern size_t copy_from_user_pt(size_t, const void __user *, void *);
-extern size_t copy_to_user_pt(size_t, void __user *, const void *);
-extern int futex_atomic_op_pt(int, int __user *, int, int *);
-extern int futex_atomic_cmpxchg_pt(int __user *, int, int);
-
-#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
deleted file mode 100644
index 60455f104ea..00000000000
--- a/arch/s390/lib/uaccess_mvcos.c
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * arch/s390/lib/uaccess_mvcos.c
- *
- * Optimized user space space access functions based on mvcos.
- *
- * Copyright (C) IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef __s390x__
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
-static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
-{
- register unsigned long reg0 asm("0") = 0x81UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
- "9: jz 7f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 4f\n"
- "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "4:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,6f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "5: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "6:"AHI" %4,-256\n"
- " jnm 5b\n"
- " ex %4,0(%3)\n"
- " j 8f\n"
- "7:"SLR" %0,%0\n"
- "8: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
-{
- if (size <= 256)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_mvcos(size, ptr, x);
-}
-
-static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
-{
- register unsigned long reg0 asm("0") = 0x810000UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
- "6: jz 4f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
- "7:"SLR" %0,%4\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 256)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_mvcos(size, ptr, x);
-}
-
-static size_t copy_in_user_mvcos(size_t size, void __user *to,
- const void __user *from)
-{
- register unsigned long reg0 asm("0") = 0x810081UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- /* FIXME: copy with reduced length. */
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
- " jz 2f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2:"SLR" %0,%0\n"
- "3: \n"
- EX_TABLE(0b,3b)
- : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-static size_t clear_user_mvcos(size_t size, void __user *to)
-{
- register unsigned long reg0 asm("0") = 0x810000UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
- " jz 4f\n"
- "1:"ALR" %0,%2\n"
- " "SLR" %1,%2\n"
- " j 0b\n"
- "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
- " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " "SLR" %3,%1\n"
- " "CLR" %0,%3\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
- " "SLR" %0,%3\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b)
- : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
- return size;
-}
-
-static size_t strnlen_user_mvcos(size_t count, const char __user *src)
-{
- char buf[256];
- int rc;
- size_t done, len, len_str;
-
- done = 0;
- do {
- len = min(count - done, (size_t) 256);
- rc = uaccess.copy_from_user(len, src + done, buf);
- if (unlikely(rc == len))
- return 0;
- len -= rc;
- len_str = strnlen(buf, len);
- done += len_str;
- } while ((len_str == len) && (done < count));
- return done + 1;
-}
-
-static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
- char *dst)
-{
- int rc;
- size_t done, len, len_str;
-
- done = 0;
- do {
- len = min(count - done, (size_t) 4096);
- rc = uaccess.copy_from_user(len, src + done, dst);
- if (unlikely(rc == len))
- return -EFAULT;
- len -= rc;
- len_str = strnlen(dst, len);
- done += len_str;
- } while ((len_str == len) && (done < count));
- return done;
-}
-
-struct uaccess_ops uaccess_mvcos = {
- .copy_from_user = copy_from_user_mvcos_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_mvcos_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_mvcos,
- .clear_user = clear_user_mvcos,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
-
-struct uaccess_ops uaccess_mvcos_switch = {
- .copy_from_user = copy_from_user_mvcos,
- .copy_from_user_small = copy_from_user_mvcos,
- .copy_to_user = copy_to_user_mvcos,
- .copy_to_user_small = copy_to_user_mvcos,
- .copy_in_user = copy_in_user_mvcos,
- .clear_user = clear_user_mvcos,
- .strnlen_user = strnlen_user_mvcos,
- .strncpy_from_user = strncpy_from_user_mvcos,
- .futex_atomic_op = futex_atomic_op_pt,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
-};
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
deleted file mode 100644
index 404f2de296d..00000000000
--- a/arch/s390/lib/uaccess_pt.c
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * arch/s390/lib/uaccess_pt.c
- *
- * User access functions based on page table walks for enhanced
- * system layout without hardware support.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/mm.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- return (pte_t *) 0x3a;
-
- pud = pud_offset(pgd, addr);
- if (pud_none(*pud) || unlikely(pud_bad(*pud)))
- return (pte_t *) 0x3b;
-
- pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return (pte_t *) 0x10;
-
- return pte_offset_map(pmd, addr);
-}
-
-static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
- size_t n, int write_user)
-{
- struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, size;
- pte_t *pte;
- void *from, *to;
-
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- } else if (write_user && !pte_write(*pte)) {
- pte = (pte_t *) 0x04;
- goto fault;
- }
-
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE - 1);
- size = min(n - done, PAGE_SIZE - offset);
- if (write_user) {
- to = (void *)((pfn << PAGE_SHIFT) + offset);
- from = kptr + done;
- } else {
- from = (void *)((pfn << PAGE_SHIFT) + offset);
- to = kptr + done;
- }
- memcpy(to, from, size);
- done += size;
- uaddr += size;
- } while (done < n);
- spin_unlock(&mm->page_table_lock);
- return n - done;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, write_user))
- return n - done;
- goto retry;
-}
-
-/*
- * Do DAT for user address by page table walk, return kernel address.
- * This function needs to be called with current->mm->page_table_lock held.
- */
-static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
-{
- struct mm_struct *mm = current->mm;
- unsigned long pfn;
- pte_t *pte;
- int rc;
-
-retry:
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- }
-
- pfn = pte_pfn(*pte);
- return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
-fault:
- spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(uaddr, (unsigned long) pte, 0);
- spin_lock(&mm->page_table_lock);
- if (!rc)
- goto retry;
- return 0;
-}
-
-size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
-{
- size_t rc;
-
- if (segment_eq(get_fs(), KERNEL_DS)) {
- memcpy(to, (void __kernel __force *) from, n);
- return 0;
- }
- rc = __user_copy_pt((unsigned long) from, to, n, 0);
- if (unlikely(rc))
- memset(to + n - rc, 0, rc);
- return rc;
-}
-
-size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
-{
- if (segment_eq(get_fs(), KERNEL_DS)) {
- memcpy((void __kernel __force *) to, from, n);
- return 0;
- }
- return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
-}
-
-static size_t clear_user_pt(size_t n, void __user *to)
-{
- long done, size, ret;
-
- if (segment_eq(get_fs(), KERNEL_DS)) {
- memset((void __kernel __force *) to, 0, n);
- return 0;
- }
- done = 0;
- do {
- if (n - done > PAGE_SIZE)
- size = PAGE_SIZE;
- else
- size = n - done;
- ret = __user_copy_pt((unsigned long) to + done,
- &empty_zero_page, size, 1);
- done += size;
- if (ret)
- return ret + n - done;
- } while (done < n);
- return 0;
-}
-
-static size_t strnlen_user_pt(size_t count, const char __user *src)
-{
- char *addr;
- unsigned long uaddr = (unsigned long) src;
- struct mm_struct *mm = current->mm;
- unsigned long offset, pfn, done, len;
- pte_t *pte;
- size_t len_str;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return strnlen((const char __kernel __force *) src, count) + 1;
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- pte = follow_table(mm, uaddr);
- if ((unsigned long) pte < 0x1000)
- goto fault;
- if (!pte_present(*pte)) {
- pte = (pte_t *) 0x11;
- goto fault;
- }
-
- pfn = pte_pfn(*pte);
- offset = uaddr & (PAGE_SIZE-1);
- addr = (char *)(pfn << PAGE_SHIFT) + offset;
- len = min(count - done, PAGE_SIZE - offset);
- len_str = strnlen(addr, len);
- done += len_str;
- uaddr += len_str;
- } while ((len_str == len) && (done < count));
- spin_unlock(&mm->page_table_lock);
- return done + 1;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, (unsigned long) pte, 0))
- return 0;
- goto retry;
-}
-
-static size_t strncpy_from_user_pt(size_t count, const char __user *src,
- char *dst)
-{
- size_t n = strnlen_user_pt(count, src);
-
- if (!n)
- return -EFAULT;
- if (n > count)
- n = count;
- if (segment_eq(get_fs(), KERNEL_DS)) {
- memcpy(dst, (const char __kernel __force *) src, n);
- if (dst[n-1] == '\0')
- return n-1;
- else
- return n;
- }
- if (__user_copy_pt((unsigned long) src, dst, n, 0))
- return -EFAULT;
- if (dst[n-1] == '\0')
- return n-1;
- else
- return n;
-}
-
-static size_t copy_in_user_pt(size_t n, void __user *to,
- const void __user *from)
-{
- struct mm_struct *mm = current->mm;
- unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
- uaddr, done, size, error_code;
- unsigned long uaddr_from = (unsigned long) from;
- unsigned long uaddr_to = (unsigned long) to;
- pte_t *pte_from, *pte_to;
- int write_user;
-
- if (segment_eq(get_fs(), KERNEL_DS)) {
- memcpy((void __force *) to, (void __force *) from, n);
- return 0;
- }
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- write_user = 0;
- uaddr = uaddr_from;
- pte_from = follow_table(mm, uaddr_from);
- error_code = (unsigned long) pte_from;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_from)) {
- error_code = 0x11;
- goto fault;
- }
-
- write_user = 1;
- uaddr = uaddr_to;
- pte_to = follow_table(mm, uaddr_to);
- error_code = (unsigned long) pte_to;
- if (error_code < 0x1000)
- goto fault;
- if (!pte_present(*pte_to)) {
- error_code = 0x11;
- goto fault;
- } else if (!pte_write(*pte_to)) {
- error_code = 0x04;
- goto fault;
- }
-
- pfn_from = pte_pfn(*pte_from);
- pfn_to = pte_pfn(*pte_to);
- offset_from = uaddr_from & (PAGE_SIZE-1);
- offset_to = uaddr_from & (PAGE_SIZE-1);
- offset_max = max(offset_from, offset_to);
- size = min(n - done, PAGE_SIZE - offset_max);
-
- memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
- (void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
- done += size;
- uaddr_from += size;
- uaddr_to += size;
- } while (done < n);
- spin_unlock(&mm->page_table_lock);
- return n - done;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, error_code, write_user))
- return n - done;
- goto retry;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile("0: l %1,0(%6)\n" \
- "1: " insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4:\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc" );
-
-static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
-{
- int oldval = 0, newval, ret;
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
- if (ret == 0)
- *old = oldval;
- return ret;
-}
-
-int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
-{
- int ret;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return __futex_atomic_op_pt(op, uaddr, oparg, old);
- spin_lock(&current->mm->page_table_lock);
- uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
- if (!uaddr) {
- spin_unlock(&current->mm->page_table_lock);
- return -EFAULT;
- }
- get_page(virt_to_page(uaddr));
- spin_unlock(&current->mm->page_table_lock);
- ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
- put_page(virt_to_page(uaddr));
- return ret;
-}
-
-static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
-{
- int ret;
-
- asm volatile("0: cs %1,%4,0(%5)\n"
- "1: lr %0,%1\n"
- "2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
- : "cc", "memory" );
- return ret;
-}
-
-int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
-{
- int ret;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
- spin_lock(&current->mm->page_table_lock);
- uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
- if (!uaddr) {
- spin_unlock(&current->mm->page_table_lock);
- return -EFAULT;
- }
- get_page(virt_to_page(uaddr));
- spin_unlock(&current->mm->page_table_lock);
- ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
- put_page(virt_to_page(uaddr));
- return ret;
-}
-
-struct uaccess_ops uaccess_pt = {
- .copy_from_user = copy_from_user_pt,
- .copy_from_user_small = copy_from_user_pt,
- .copy_to_user = copy_to_user_pt,
- .copy_to_user_small = copy_to_user_pt,
- .copy_in_user = copy_in_user_pt,
- .clear_user = clear_user_pt,
- .strnlen_user = strnlen_user_pt,
- .strncpy_from_user = strncpy_from_user_pt,
- .futex_atomic_op = futex_atomic_op_pt,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
-};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644
index 07deaeee14c..00000000000
--- a/arch/s390/lib/uaccess_std.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * arch/s390/lib/uaccess_std.c
- *
- * Standard user space access functions based on mvcp/mvcs and doing
- * interesting things in the secondary space mode.
- *
- * Copyright (C) IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef __s390x__
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
-size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcp 0(%0,%2),0(%1),%3\n"
- "10:jz 8f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcp 0(%0,%2),0(%1),%3\n"
- "11:jnz 1b\n"
- " j 8f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,7f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "6: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
- " jnm 6b\n"
- " ex %4,0(%3)\n"
- " j 9f\n"
- "8:"SLR" %0,%0\n"
- "9: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
- EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
- void *x)
-{
- if (size <= 1024)
- return copy_from_user_std(size, ptr, x);
- return copy_from_user_pt(size, ptr, x);
-}
-
-size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
-{
- unsigned long tmp1, tmp2;
-
- tmp1 = -256UL;
- asm volatile(
- "0: mvcs 0(%0,%1),0(%2),%3\n"
- "7: jz 5f\n"
- "1:"ALR" %0,%3\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "2: mvcs 0(%0,%1),0(%2),%3\n"
- "8: jnz 1b\n"
- " j 5f\n"
- "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
- " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
- " j 6f\n"
- "5:"SLR" %0,%0\n"
- "6: \n"
- EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
- EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-static size_t copy_to_user_std_check(size_t size, void __user *ptr,
- const void *x)
-{
- if (size <= 1024)
- return copy_to_user_std(size, ptr, x);
- return copy_to_user_pt(size, ptr, x);
-}
-
-static size_t copy_in_user_std(size_t size, void __user *to,
- const void __user *from)
-{
- unsigned long tmp1;
-
- asm volatile(
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " sacf 256\n"
- " bras %3,3f\n"
- "0:"AHI" %0,257\n"
- "1: mvc 0(1,%1),0(%2)\n"
- " la %1,1(%1)\n"
- " la %2,1(%2)\n"
- " "AHI" %0,-1\n"
- " jnz 1b\n"
- " j 5f\n"
- "2: mvc 0(256,%1),0(%2)\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,1b-0b(%3)\n"
- " sacf 0\n"
- "5: "SLR" %0,%0\n"
- "6:\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
- : : "cc", "memory");
- return size;
-}
-
-static size_t clear_user_std(size_t size, void __user *to)
-{
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " sacf 256\n"
- " bras %3,3f\n"
- " xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
- " la %2,255(%1)\n" /* %2 = ptr + 255 */
- " srl %2,12\n"
- " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
- " jnh 5f\n"
- " "AHI" %2,-1\n"
- "1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
- " j 5f\n"
- "2: xc 0(256,%1),0(%1)\n"
- " la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,0(%3)\n"
- " sacf 0\n"
- "5: "SLR" %0,%0\n"
- "6:\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
- : : "cc", "memory");
- return size;
-}
-
-size_t strnlen_user_std(size_t size, const char __user *src)
-{
- register unsigned long reg0 asm("0") = 0UL;
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " la %2,0(%1)\n"
- " la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
- " sacf 256\n"
- "0: srst %3,%2\n"
- " jo 0b\n"
- " la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
- "1: sacf 0\n"
- EX_TABLE(0b,1b)
- : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
-{
- register unsigned long reg0 asm("0") = 0UL;
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " la %3,0(%1)\n"
- " la %4,0(%0,%1)\n"
- " sacf 256\n"
- "0: srst %4,%3\n"
- " jo 0b\n"
- " sacf 0\n"
- " la %0,0(%4)\n"
- " jh 1f\n" /* found \0 in string ? */
- " "AHI" %4,1\n" /* include \0 in copy */
- "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
- " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
- "2: mvcp 0(%4,%2),0(%1),%5\n"
- " jz 9f\n"
- "3:"AHI" %4,-256\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "4: mvcp 0(%4,%2),0(%1),%5\n"
- " jnz 3b\n"
- " j 9f\n"
- "7: sacf 0\n"
- "8:"LHI" %0,%6\n"
- "9:\n"
- EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
- : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
- return size;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile( \
- " sacf 256\n" \
- "0: l %1,0(%6)\n" \
- "1:"insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4: sacf 0\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc");
-
-int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
-{
- int oldval = 0, newval, ret;
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
- *old = oldval;
- return ret;
-}
-
-int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval)
-{
- int ret;
-
- asm volatile(
- " sacf 256\n"
- "0: cs %1,%4,0(%5)\n"
- "1: lr %0,%1\n"
- "2: sacf 0\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
- : "cc", "memory" );
- return ret;
-}
-
-struct uaccess_ops uaccess_std = {
- .copy_from_user = copy_from_user_std_check,
- .copy_from_user_small = copy_from_user_std,
- .copy_to_user = copy_to_user_std_check,
- .copy_to_user_small = copy_to_user_std,
- .copy_in_user = copy_in_user_std,
- .clear_user = clear_user_std,
- .strnlen_user = strnlen_user_std,
- .strncpy_from_user = strncpy_from_user_std,
- .futex_atomic_op = futex_atomic_op_std,
- .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
deleted file mode 100644
index 14b363fec8a..00000000000
--- a/arch/s390/lib/usercopy.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <linux/module.h>
-#include <linux/bug.h>
-
-void copy_from_user_overflow(void)
-{
- WARN(1, "Buffer overflow detected!\n");
-}
-EXPORT_SYMBOL(copy_from_user_overflow);