diff options
Diffstat (limited to 'arch/s390/include')
67 files changed, 2731 insertions, 1610 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index f313f9cbcf4..57892a8a905 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -1,4 +1,7 @@ generic-y += clkdev.h +generic-y += hash.h +generic-y += mcs_spinlock.h +generic-y += preempt.h generic-y += trace_clock.h diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index 4bbb5957ed1..bd93ff6661b 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h @@ -44,11 +44,21 @@ struct airq_iv { struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); void airq_iv_release(struct airq_iv *iv); -unsigned long airq_iv_alloc_bit(struct airq_iv *iv); -void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); +unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num); +void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num); unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, unsigned long end); +static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv) +{ + return airq_iv_alloc(iv, 1); +} + +static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit) +{ + airq_iv_free(iv, bit, 1); +} + static inline unsigned long airq_iv_end(struct airq_iv *iv) { return iv->end; diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c797832daa5..fa934fe080c 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -15,25 +15,61 @@ #include <linux/compiler.h> #include <linux/types.h> +#include <asm/barrier.h> #include <asm/cmpxchg.h> #define ATOMIC_INIT(i) { (i) } -#define __CS_LOOP(ptr, op_val, op_string) ({ \ +#define __ATOMIC_NO_BARRIER "\n" + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __ATOMIC_OR "lao" +#define __ATOMIC_AND "lan" +#define __ATOMIC_ADD "laa" +#define __ATOMIC_BARRIER "bcr 14,0\n" + +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ + int old_val; \ + \ + typecheck(atomic_t *, ptr); \ + asm volatile( \ + __barrier \ + op_string " %0,%2,%1\n" \ + __barrier \ + : "=d" (old_val), "+Q" ((ptr)->counter) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define __ATOMIC_OR "or" +#define __ATOMIC_AND "nr" +#define __ATOMIC_ADD "ar" +#define __ATOMIC_BARRIER "\n" + +#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ int old_val, new_val; \ + \ + typecheck(atomic_t *, ptr); \ asm volatile( \ " l %0,%2\n" \ "0: lr %1,%0\n" \ op_string " %1,%3\n" \ " cs %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=Q" (((atomic_t *)(ptr))->counter) \ - : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ + : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ + : "d" (op_val) \ : "cc", "memory"); \ - new_val; \ + old_val; \ }) +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + static inline int atomic_read(const atomic_t *v) { int c; @@ -53,32 +89,43 @@ static inline void atomic_set(atomic_t *v, int i) static inline int atomic_add_return(int i, atomic_t *v) { - return __CS_LOOP(v, i, "ar"); + return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i; } -#define atomic_add(_i, _v) atomic_add_return(_i, _v) -#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) -#define atomic_inc(_v) atomic_add_return(1, _v) -#define atomic_inc_return(_v) atomic_add_return(1, _v) -#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) -static inline int atomic_sub_return(int i, atomic_t *v) +static inline void atomic_add(int i, atomic_t *v) { - return __CS_LOOP(v, i, "sr"); +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + asm volatile( + "asi %0,%1\n" + : "+Q" (v->counter) + : "i" (i) + : "cc", "memory"); + return; + } +#endif + __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER); } -#define atomic_sub(_i, _v) atomic_sub_return(_i, _v) + +#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0) +#define atomic_inc(_v) atomic_add(1, _v) +#define atomic_inc_return(_v) atomic_add_return(1, _v) +#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) +#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v) +#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v) #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0) -#define atomic_dec(_v) atomic_sub_return(1, _v) +#define atomic_dec(_v) atomic_sub(1, _v) #define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) { - __CS_LOOP(v, ~mask, "nr"); + __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER); } -static inline void atomic_set_mask(unsigned long mask, atomic_t *v) +static inline void atomic_set_mask(unsigned int mask, atomic_t *v) { - __CS_LOOP(v, mask, "or"); + __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER); } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) @@ -87,8 +134,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { asm volatile( " cs %0,%2,%1" - : "+d" (old), "=Q" (v->counter) - : "d" (new), "Q" (v->counter) + : "+d" (old), "+Q" (v->counter) + : "d" (new) : "cc", "memory"); return old; } @@ -109,27 +156,62 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) } -#undef __CS_LOOP +#undef __ATOMIC_LOOP #define ATOMIC64_INIT(i) { (i) } #ifdef CONFIG_64BIT -#define __CSG_LOOP(ptr, op_val, op_string) ({ \ +#define __ATOMIC64_NO_BARRIER "\n" + +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __ATOMIC64_OR "laog" +#define __ATOMIC64_AND "lang" +#define __ATOMIC64_ADD "laag" +#define __ATOMIC64_BARRIER "bcr 14,0\n" + +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ + long long old_val; \ + \ + typecheck(atomic64_t *, ptr); \ + asm volatile( \ + __barrier \ + op_string " %0,%2,%1\n" \ + __barrier \ + : "=d" (old_val), "+Q" ((ptr)->counter) \ + : "d" (op_val) \ + : "cc", "memory"); \ + old_val; \ +}) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define __ATOMIC64_OR "ogr" +#define __ATOMIC64_AND "ngr" +#define __ATOMIC64_ADD "agr" +#define __ATOMIC64_BARRIER "\n" + +#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \ +({ \ long long old_val, new_val; \ + \ + typecheck(atomic64_t *, ptr); \ asm volatile( \ " lg %0,%2\n" \ "0: lgr %1,%0\n" \ op_string " %1,%3\n" \ " csg %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (old_val), "=&d" (new_val), \ - "=Q" (((atomic_t *)(ptr))->counter) \ - : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ + : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\ + : "d" (op_val) \ : "cc", "memory"); \ - new_val; \ + old_val; \ }) +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + static inline long long atomic64_read(const atomic64_t *v) { long long c; @@ -149,22 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i) static inline long long atomic64_add_return(long long i, atomic64_t *v) { - return __CSG_LOOP(v, i, "agr"); + return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i; } -static inline long long atomic64_sub_return(long long i, atomic64_t *v) +static inline void atomic64_add(long long i, atomic64_t *v) { - return __CSG_LOOP(v, i, "sgr"); +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + if (__builtin_constant_p(i) && (i > -129) && (i < 128)) { + asm volatile( + "agsi %0,%1\n" + : "+Q" (v->counter) + : "i" (i) + : "cc", "memory"); + return; + } +#endif + __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER); } static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, ~mask, "ngr"); + __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER); } static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, mask, "ogr"); + __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER); } #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) @@ -174,13 +266,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, { asm volatile( " csg %0,%2,%1" - : "+d" (old), "=Q" (v->counter) - : "d" (new), "Q" (v->counter) + : "+d" (old), "+Q" (v->counter) + : "d" (new) : "cc", "memory"); return old; } -#undef __CSG_LOOP +#undef __ATOMIC64_LOOP #else /* CONFIG_64BIT */ @@ -216,8 +308,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) " lm %0,%N0,%1\n" "0: cds %0,%2,%1\n" " jl 0b\n" - : "=&d" (rp_old), "=Q" (v->counter) - : "d" (rp_new), "Q" (v->counter) + : "=&d" (rp_old), "+Q" (v->counter) + : "d" (rp_new) : "cc"); return rp_old.pair; } @@ -230,8 +322,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, asm volatile( " cds %0,%2,%1" - : "+&d" (rp_old), "=Q" (v->counter) - : "d" (rp_new), "Q" (v->counter) + : "+&d" (rp_old), "+Q" (v->counter) + : "d" (rp_new) : "cc"); return rp_old.pair; } @@ -248,17 +340,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) return new; } -static inline long long atomic64_sub_return(long long i, atomic64_t *v) -{ - long long old, new; - - do { - old = atomic64_read(v); - new = old - i; - } while (atomic64_cmpxchg(v, old, new) != old); - return new; -} - static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) { long long old, new; @@ -279,9 +360,14 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) } while (atomic64_cmpxchg(v, old, new) != old); } +static inline void atomic64_add(long long i, atomic64_t *v) +{ + atomic64_add_return(i, v); +} + #endif /* CONFIG_64BIT */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) { long long c, old; @@ -289,7 +375,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) for (;;) { if (unlikely(c == u)) break; - old = atomic64_cmpxchg(v, c, c + a); + old = atomic64_cmpxchg(v, c, c + i); if (likely(old == c)) break; c = old; @@ -314,21 +400,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) return dec; } -#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) -#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc(_v) atomic64_add(1, _v) #define atomic64_inc_return(_v) atomic64_add_return(1, _v) #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) -#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v) +#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v) #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) -#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec(_v) atomic64_sub(1, _v) #define atomic64_dec_return(_v) atomic64_sub_return(1, _v) #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) -#define smp_mb__before_atomic_dec() smp_mb() -#define smp_mb__after_atomic_dec() smp_mb() -#define smp_mb__before_atomic_inc() smp_mb() -#define smp_mb__after_atomic_inc() smp_mb() - #endif /* __ARCH_S390_ATOMIC__ */ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 16760eeb79b..19ff956b752 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -27,9 +27,25 @@ #define smp_rmb() rmb() #define smp_wmb() wmb() #define smp_read_barrier_depends() read_barrier_depends() -#define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() smp_mb() + +#define smp_mb__before_atomic() smp_mb() +#define smp_mb__after_atomic() smp_mb() #define set_mb(var, value) do { var = value; mb(); } while (0) +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + #endif /* __ASM_BARRIER_H */ diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 10135a38673..52054247767 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -1,10 +1,40 @@ /* - * S390 version - * Copyright IBM Corp. 1999 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + * Copyright IBM Corp. 1999,2013 * - * Derived from "include/asm-i386/bitops.h" - * Copyright (C) 1992, Linus Torvalds + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * + * The description below was taken in large parts from the powerpc + * bitops header file: + * Within a word, bits are numbered LSB first. Lot's of places make + * this assumption by directly testing bits with (val & (1<<nr)). + * This can cause confusion for large (> 1 word) bitmaps on a + * big-endian system because, unlike little endian, the number of each + * bit depends on the word size. + * + * The bitop functions are defined to work on unsigned longs, so for an + * s390x system the bits end up numbered: + * |63..............0|127............64|191...........128|255...........192| + * and on s390: + * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224| + * + * There are a few little-endian macros used mostly for filesystem + * bitmaps, these work on similar bit arrays layouts, but + * byte-oriented: + * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| + * + * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit + * number field needs to be reversed compared to the big-endian bit + * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). + * + * We also have special functions which work with an MSB0 encoding: + * on an s390x system the bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + * and on s390: + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| + * + * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit + * number field needs to be reversed compared to the LSB0 encoded bit + * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b). * */ @@ -15,556 +45,353 @@ #error only <linux/bitops.h> can be included directly #endif +#include <linux/typecheck.h> #include <linux/compiler.h> +#include <asm/barrier.h> -/* - * 32 bit bitops format: - * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr; - * bit 32 is the LSB of *(addr+4). That combined with the - * big endian byte order on S390 give the following bit - * order in memory: - * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \ - * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 - * after that follows the next long with bit numbers - * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 - * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 - * The reason for this bit ordering is the fact that - * in the architecture independent code bits operations - * of the form "flags |= (1 << bitnr)" are used INTERMIXED - * with operation of the form "set_bit(bitnr, flags)". - * - * 64 bit bitops format: - * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr; - * bit 64 is the LSB of *(addr+8). That combined with the - * big endian byte order on S390 give the following bit - * order in memory: - * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30 - * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20 - * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 - * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00 - * after that follows the next long with bit numbers - * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70 - * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60 - * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50 - * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40 - * The reason for this bit ordering is the fact that - * in the architecture independent code bits operations - * of the form "flags |= (1 << bitnr)" are used INTERMIXED - * with operation of the form "set_bit(bitnr, flags)". - */ - -/* bitmap tables from arch/s390/kernel/bitmap.c */ -extern const char _oi_bitmap[]; -extern const char _ni_bitmap[]; -extern const char _zb_findmap[]; -extern const char _sb_findmap[]; +#define __BITOPS_NO_BARRIER "\n" #ifndef CONFIG_64BIT #define __BITOPS_OR "or" #define __BITOPS_AND "nr" #define __BITOPS_XOR "xr" +#define __BITOPS_BARRIER "\n" -#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ +({ \ + unsigned long __old, __new; \ + \ + typecheck(unsigned long *, (__addr)); \ asm volatile( \ " l %0,%2\n" \ "0: lr %1,%0\n" \ __op_string " %1,%3\n" \ " cs %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (__old), "=&d" (__new), \ - "=Q" (*(unsigned long *) __addr) \ - : "d" (__val), "Q" (*(unsigned long *) __addr) \ - : "cc"); + : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ + : "d" (__val) \ + : "cc", "memory"); \ + __old; \ +}) #else /* CONFIG_64BIT */ +#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define __BITOPS_OR "laog" +#define __BITOPS_AND "lang" +#define __BITOPS_XOR "laxg" +#define __BITOPS_BARRIER "bcr 14,0\n" + +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ +({ \ + unsigned long __old; \ + \ + typecheck(unsigned long *, (__addr)); \ + asm volatile( \ + __barrier \ + __op_string " %0,%2,%1\n" \ + __barrier \ + : "=d" (__old), "+Q" (*(__addr)) \ + : "d" (__val) \ + : "cc", "memory"); \ + __old; \ +}) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + #define __BITOPS_OR "ogr" #define __BITOPS_AND "ngr" #define __BITOPS_XOR "xgr" +#define __BITOPS_BARRIER "\n" -#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \ +#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \ +({ \ + unsigned long __old, __new; \ + \ + typecheck(unsigned long *, (__addr)); \ asm volatile( \ " lg %0,%2\n" \ "0: lgr %1,%0\n" \ __op_string " %1,%3\n" \ " csg %0,%1,%2\n" \ " jl 0b" \ - : "=&d" (__old), "=&d" (__new), \ - "=Q" (*(unsigned long *) __addr) \ - : "d" (__val), "Q" (*(unsigned long *) __addr) \ - : "cc"); + : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\ + : "d" (__val) \ + : "cc", "memory"); \ + __old; \ +}) + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #endif /* CONFIG_64BIT */ #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG) -#ifdef CONFIG_SMP -/* - * SMP safe set_bit routine based on compare and swap (CS) - */ -static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr) +static inline unsigned long * +__bitops_word(unsigned long nr, volatile unsigned long *ptr) +{ + unsigned long addr; + + addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3); + return (unsigned long *)addr; +} + +static inline unsigned char * +__bitops_byte(unsigned long nr, volatile unsigned long *ptr) +{ + return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); +} + +static inline void set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask; + +#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES + if (__builtin_constant_p(nr)) { + unsigned char *caddr = __bitops_byte(nr, ptr); - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make OR mask */ + asm volatile( + "oi %0,%b1\n" + : "+Q" (*caddr) + : "i" (1 << (nr & 7)) + : "cc", "memory"); + return; + } +#endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); + __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER); } -/* - * SMP safe clear_bit routine based on compare and swap (CS) - */ -static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) +static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask; + +#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES + if (__builtin_constant_p(nr)) { + unsigned char *caddr = __bitops_byte(nr, ptr); - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make AND mask */ + asm volatile( + "ni %0,%b1\n" + : "+Q" (*caddr) + : "i" (~(1 << (nr & 7))) + : "cc", "memory"); + return; + } +#endif mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); + __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER); } -/* - * SMP safe change_bit routine based on compare and swap (CS) - */ -static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr) +static inline void change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long mask; - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make XOR mask */ +#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES + if (__builtin_constant_p(nr)) { + unsigned char *caddr = __bitops_byte(nr, ptr); + + asm volatile( + "xi %0,%b1\n" + : "+Q" (*caddr) + : "i" (1 << (nr & 7)) + : "cc", "memory"); + return; + } +#endif mask = 1UL << (nr & (BITS_PER_LONG - 1)); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); + __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER); } -/* - * SMP safe test_and_set_bit routine based on compare and swap (CS) - */ static inline int -test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr) +test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long old, mask; - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make OR/test mask */ mask = 1UL << (nr & (BITS_PER_LONG - 1)); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR); - barrier(); + old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER); return (old & mask) != 0; } -/* - * SMP safe test_and_clear_bit routine based on compare and swap (CS) - */ static inline int -test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr) +test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long old, mask; - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make AND/test mask */ mask = ~(1UL << (nr & (BITS_PER_LONG - 1))); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND); - barrier(); - return (old ^ new) != 0; + old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER); + return (old & ~mask) != 0; } -/* - * SMP safe test_and_change_bit routine based on compare and swap (CS) - */ static inline int -test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr) +test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr, old, new, mask; + unsigned long *addr = __bitops_word(nr, ptr); + unsigned long old, mask; - addr = (unsigned long) ptr; - /* calculate address for CS */ - addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3; - /* make XOR/test mask */ mask = 1UL << (nr & (BITS_PER_LONG - 1)); - /* Do the atomic update. */ - __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR); - barrier(); + old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER); return (old & mask) != 0; } -#endif /* CONFIG_SMP */ -/* - * fast, non-SMP set_bit routine - */ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; - - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - asm volatile( - " oc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); -} - -static inline void -__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr) -{ - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); - addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - *(unsigned char *) addr |= 1 << (nr & 7); + *addr |= 1 << (nr & 7); } -#define set_bit_simple(nr,addr) \ -(__builtin_constant_p((nr)) ? \ - __constant_set_bit((nr),(addr)) : \ - __set_bit((nr),(addr)) ) - -/* - * fast, non-SMP clear_bit routine - */ static inline void __clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; - - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - asm volatile( - " nc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc"); -} - -static inline void -__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr) -{ - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); - addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - *(unsigned char *) addr &= ~(1 << (nr & 7)); + *addr &= ~(1 << (nr & 7)); } -#define clear_bit_simple(nr,addr) \ -(__builtin_constant_p((nr)) ? \ - __constant_clear_bit((nr),(addr)) : \ - __clear_bit((nr),(addr)) ) - -/* - * fast, non-SMP change_bit routine - */ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; - - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - asm volatile( - " xc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); -} - -static inline void -__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) -{ - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); - addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - *(unsigned char *) addr ^= 1 << (nr & 7); + *addr ^= 1 << (nr & 7); } -#define change_bit_simple(nr,addr) \ -(__builtin_constant_p((nr)) ? \ - __constant_change_bit((nr),(addr)) : \ - __change_bit((nr),(addr)) ) - -/* - * fast, non-SMP test_and_set_bit routine - */ static inline int -test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) +__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - ch = *(unsigned char *) addr; - asm volatile( - " oc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) - : "cc", "memory"); + ch = *addr; + *addr |= 1 << (nr & 7); return (ch >> (nr & 7)) & 1; } -#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y) -/* - * fast, non-SMP test_and_clear_bit routine - */ static inline int -test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) +__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - ch = *(unsigned char *) addr; - asm volatile( - " nc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) - : "cc", "memory"); + ch = *addr; + *addr &= ~(1 << (nr & 7)); return (ch >> (nr & 7)) & 1; } -#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y) -/* - * fast, non-SMP test_and_change_bit routine - */ static inline int -test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) +__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr) { - unsigned long addr; + unsigned char *addr = __bitops_byte(nr, ptr); unsigned char ch; - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - ch = *(unsigned char *) addr; - asm volatile( - " xc %O0(1,%R0),%1" - : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) - : "cc", "memory"); + ch = *addr; + *addr ^= 1 << (nr & 7); return (ch >> (nr & 7)) & 1; } -#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y) - -#ifdef CONFIG_SMP -#define set_bit set_bit_cs -#define clear_bit clear_bit_cs -#define change_bit change_bit_cs -#define test_and_set_bit test_and_set_bit_cs -#define test_and_clear_bit test_and_clear_bit_cs -#define test_and_change_bit test_and_change_bit_cs -#else -#define set_bit set_bit_simple -#define clear_bit clear_bit_simple -#define change_bit change_bit_simple -#define test_and_set_bit test_and_set_bit_simple -#define test_and_clear_bit test_and_clear_bit_simple -#define test_and_change_bit test_and_change_bit_simple -#endif - -/* - * This routine doesn't need to be atomic. - */ - -static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr) +static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) { - unsigned long addr; - unsigned char ch; + const volatile unsigned char *addr; - addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); - ch = *(volatile unsigned char *) addr; - return (ch >> (nr & 7)) & 1; -} - -static inline int -__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) { - return (((volatile char *) addr) - [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0; + addr = ((const volatile unsigned char *)ptr); + addr += (nr ^ (BITS_PER_LONG - 8)) >> 3; + return (*addr >> (nr & 7)) & 1; } -#define test_bit(nr,addr) \ -(__builtin_constant_p((nr)) ? \ - __constant_test_bit((nr),(addr)) : \ - __test_bit((nr),(addr)) ) - /* - * Optimized find bit helper functions. + * Functions which use MSB0 bit numbering. + * On an s390x system the bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + * and on s390: + * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| */ +unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); +unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, + unsigned long offset); -/** - * __ffz_word_loop - find byte offset of first long != -1UL - * @addr: pointer to array of unsigned long - * @size: size of the array in bits - */ -static inline unsigned long __ffz_word_loop(const unsigned long *addr, - unsigned long size) +static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr) { - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; - unsigned long bytes = 0; - - asm volatile( -#ifndef CONFIG_64BIT - " ahi %1,-1\n" - " sra %1,5\n" - " jz 1f\n" - "0: c %2,0(%0,%3)\n" - " jne 1f\n" - " la %0,4(%0)\n" - " brct %1,0b\n" - "1:\n" -#else - " aghi %1,-1\n" - " srag %1,%1,6\n" - " jz 1f\n" - "0: cg %2,0(%0,%3)\n" - " jne 1f\n" - " la %0,8(%0)\n" - " brct %1,0b\n" - "1:\n" -#endif - : "+&a" (bytes), "+&d" (size) - : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr) - : "cc" ); - return bytes; -} - -/** - * __ffs_word_loop - find byte offset of first long != 0UL - * @addr: pointer to array of unsigned long - * @size: size of the array in bits - */ -static inline unsigned long __ffs_word_loop(const unsigned long *addr, - unsigned long size) -{ - typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype; - unsigned long bytes = 0; - - asm volatile( -#ifndef CONFIG_64BIT - " ahi %1,-1\n" - " sra %1,5\n" - " jz 1f\n" - "0: c %2,0(%0,%3)\n" - " jne 1f\n" - " la %0,4(%0)\n" - " brct %1,0b\n" - "1:\n" -#else - " aghi %1,-1\n" - " srag %1,%1,6\n" - " jz 1f\n" - "0: cg %2,0(%0,%3)\n" - " jne 1f\n" - " la %0,8(%0)\n" - " brct %1,0b\n" - "1:\n" -#endif - : "+&a" (bytes), "+&a" (size) - : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr) - : "cc" ); - return bytes; + return set_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -/** - * __ffz_word - add number of the first unset bit - * @nr: base value the bit number is added to - * @word: the word that is searched for unset bits - */ -static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) +static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) { -#ifdef CONFIG_64BIT - if ((word & 0xffffffff) == 0xffffffff) { - word >>= 32; - nr += 32; - } -#endif - if ((word & 0xffff) == 0xffff) { - word >>= 16; - nr += 16; - } - if ((word & 0xff) == 0xff) { - word >>= 8; - nr += 8; - } - return nr + _zb_findmap[(unsigned char) word]; + return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -/** - * __ffs_word - add number of the first set bit - * @nr: base value the bit number is added to - * @word: the word that is searched for set bits - */ -static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) +static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr) { -#ifdef CONFIG_64BIT - if ((word & 0xffffffff) == 0) { - word >>= 32; - nr += 32; - } -#endif - if ((word & 0xffff) == 0) { - word >>= 16; - nr += 16; - } - if ((word & 0xff) == 0) { - word >>= 8; - nr += 8; - } - return nr + _sb_findmap[(unsigned char) word]; + return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr); } - -/** - * __load_ulong_be - load big endian unsigned long - * @p: pointer to array of unsigned long - * @offset: byte offset of source value in the array - */ -static inline unsigned long __load_ulong_be(const unsigned long *p, - unsigned long offset) +static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr) { - p = (unsigned long *)((unsigned long) p + offset); - return *p; + return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -/** - * __load_ulong_le - load little endian unsigned long - * @p: pointer to array of unsigned long - * @offset: byte offset of source value in the array - */ -static inline unsigned long __load_ulong_le(const unsigned long *p, - unsigned long offset) +static inline int test_bit_inv(unsigned long nr, + const volatile unsigned long *ptr) { - unsigned long word; - - p = (unsigned long *)((unsigned long) p + offset); -#ifndef CONFIG_64BIT - asm volatile( - " ic %0,%O1(%R1)\n" - " icm %0,2,%O1+1(%R1)\n" - " icm %0,4,%O1+2(%R1)\n" - " icm %0,8,%O1+3(%R1)" - : "=&d" (word) : "Q" (*p) : "cc"); -#else - asm volatile( - " lrvg %0,%1" - : "=d" (word) : "m" (*p) ); -#endif - return word; + return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); } -/* - * The various find bit functions. - */ +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES -/* - * ffz - find first zero in word. - * @word: The word to search +/** + * __flogr - find leftmost one + * @word - The word to search * - * Undefined if no zero exists, so code should check against ~0UL first. - */ -static inline unsigned long ffz(unsigned long word) -{ - return __ffz_word(0, word); + * Returns the bit number of the most significant bit set, + * where the most significant bit has bit number 0. + * If no bit is set this function returns 64. + */ +static inline unsigned char __flogr(unsigned long word) +{ + if (__builtin_constant_p(word)) { + unsigned long bit = 0; + + if (!word) + return 64; + if (!(word & 0xffffffff00000000UL)) { + word <<= 32; + bit += 32; + } + if (!(word & 0xffff000000000000UL)) { + word <<= 16; + bit += 16; + } + if (!(word & 0xff00000000000000UL)) { + word <<= 8; + bit += 8; + } + if (!(word & 0xf000000000000000UL)) { + word <<= 4; + bit += 4; + } + if (!(word & 0xc000000000000000UL)) { + word <<= 2; + bit += 2; + } + if (!(word & 0x8000000000000000UL)) { + word <<= 1; + bit += 1; + } + return bit; + } else { + register unsigned long bit asm("4") = word; + register unsigned long out asm("5"); + + asm volatile( + " flogr %[bit],%[bit]\n" + : [bit] "+d" (bit), [out] "=d" (out) : : "cc"); + return bit; + } } /** @@ -573,337 +400,83 @@ static inline unsigned long ffz(unsigned long word) * * Undefined if no bit exists, so code should check against 0 first. */ -static inline unsigned long __ffs (unsigned long word) +static inline unsigned long __ffs(unsigned long word) { - return __ffs_word(0, word); + return __flogr(-word & word) ^ (BITS_PER_LONG - 1); } /** * ffs - find first bit set - * @x: the word to search + * @word: the word to search * - * This is defined the same way as - * the libc and compiler builtin ffs routines, therefore - * differs in spirit from the above ffz (man ffs). + * This is defined the same way as the libc and + * compiler builtin ffs routines (man ffs). */ -static inline int ffs(int x) +static inline int ffs(int word) { - if (!x) - return 0; - return __ffs_word(1, x); + unsigned long mask = 2 * BITS_PER_LONG - 1; + unsigned int val = (unsigned int)word; + + return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask; } /** - * find_first_zero_bit - find the first zero bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search + * __fls - find last (most-significant) set bit in a long word + * @word: the word to search * - * Returns the bit-number of the first zero bit, not the number of the byte - * containing a bit. + * Undefined if no set bit exists, so code should check against 0 first. */ -static inline unsigned long find_first_zero_bit(const unsigned long *addr, - unsigned long size) +static inline unsigned long __fls(unsigned long word) { - unsigned long bytes, bits; - - if (!size) - return 0; - bytes = __ffz_word_loop(addr, size); - bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); - return (bits < size) ? bits : size; + return __flogr(word) ^ (BITS_PER_LONG - 1); } -#define find_first_zero_bit find_first_zero_bit /** - * find_first_bit - find the first set bit in a memory region - * @addr: The address to start the search at - * @size: The maximum size to search + * fls64 - find last set bit in a 64-bit word + * @word: the word to search * - * Returns the bit-number of the first set bit, not the number of the byte - * containing a bit. - */ -static inline unsigned long find_first_bit(const unsigned long * addr, - unsigned long size) -{ - unsigned long bytes, bits; - - if (!size) - return 0; - bytes = __ffs_word_loop(addr, size); - bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); - return (bits < size) ? bits : size; -} -#define find_first_bit find_first_bit - -/* - * Big endian variant whichs starts bit counting from left using - * the flogr (find leftmost one) instruction. - */ -static inline unsigned long __flo_word(unsigned long nr, unsigned long val) -{ - register unsigned long bit asm("2") = val; - register unsigned long out asm("3"); - - asm volatile ( - " .insn rre,0xb9830000,%[bit],%[bit]\n" - : [bit] "+d" (bit), [out] "=d" (out) : : "cc"); - return nr + bit; -} - -/* - * 64 bit special left bitops format: - * order in memory: - * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f - * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f - * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f - * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f - * after that follows the next long with bit numbers - * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f - * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f - * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f - * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f - * The reason for this bit ordering is the fact that - * the hardware sets bits in a bitmap starting at bit 0 - * and we don't want to scan the bitmap from the 'wrong - * end'. + * This is defined in a similar way as the libc and compiler builtin + * ffsll, but returns the position of the most significant set bit. + * + * fls64(value) returns 0 if value is 0 or the position of the last + * set bit if value is nonzero. The last (most significant) bit is + * at position 64. */ -static inline unsigned long find_first_bit_left(const unsigned long *addr, - unsigned long size) +static inline int fls64(unsigned long word) { - unsigned long bytes, bits; - - if (!size) - return 0; - bytes = __ffs_word_loop(addr, size); - bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes)); - return (bits < size) ? bits : size; -} - -static inline int find_next_bit_left(const unsigned long *addr, - unsigned long size, - unsigned long offset) -{ - const unsigned long *p; - unsigned long bit, set; - - if (offset >= size) - return size; - bit = offset & (BITS_PER_LONG - 1); - offset -= bit; - size -= offset; - p = addr + offset / BITS_PER_LONG; - if (bit) { - set = __flo_word(0, *p & (~0UL >> bit)); - if (set >= size) - return size + offset; - if (set < BITS_PER_LONG) - return set + offset; - offset += BITS_PER_LONG; - size -= BITS_PER_LONG; - p++; - } - return offset + find_first_bit_left(p, size); -} + unsigned long mask = 2 * BITS_PER_LONG - 1; -#define for_each_set_bit_left(bit, addr, size) \ - for ((bit) = find_first_bit_left((addr), (size)); \ - (bit) < (size); \ - (bit) = find_next_bit_left((addr), (size), (bit) + 1)) - -/* same as for_each_set_bit() but use bit as value to start with */ -#define for_each_set_bit_left_cont(bit, addr, size) \ - for ((bit) = find_next_bit_left((addr), (size), (bit)); \ - (bit) < (size); \ - (bit) = find_next_bit_left((addr), (size), (bit) + 1)) - -/** - * find_next_zero_bit - find the first zero bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search - */ -static inline int find_next_zero_bit (const unsigned long * addr, - unsigned long size, - unsigned long offset) -{ - const unsigned long *p; - unsigned long bit, set; - - if (offset >= size) - return size; - bit = offset & (BITS_PER_LONG - 1); - offset -= bit; - size -= offset; - p = addr + offset / BITS_PER_LONG; - if (bit) { - /* - * __ffz_word returns BITS_PER_LONG - * if no zero bit is present in the word. - */ - set = __ffz_word(bit, *p >> bit); - if (set >= size) - return size + offset; - if (set < BITS_PER_LONG) - return set + offset; - offset += BITS_PER_LONG; - size -= BITS_PER_LONG; - p++; - } - return offset + find_first_zero_bit(p, size); + return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask; } -#define find_next_zero_bit find_next_zero_bit /** - * find_next_bit - find the first set bit in a memory region - * @addr: The address to base the search on - * @offset: The bitnumber to start searching at - * @size: The maximum size to search + * fls - find last (most-significant) bit set + * @word: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. */ -static inline int find_next_bit (const unsigned long * addr, - unsigned long size, - unsigned long offset) +static inline int fls(int word) { - const unsigned long *p; - unsigned long bit, set; - - if (offset >= size) - return size; - bit = offset & (BITS_PER_LONG - 1); - offset -= bit; - size -= offset; - p = addr + offset / BITS_PER_LONG; - if (bit) { - /* - * __ffs_word returns BITS_PER_LONG - * if no one bit is present in the word. - */ - set = __ffs_word(0, *p & (~0UL << bit)); - if (set >= size) - return size + offset; - if (set < BITS_PER_LONG) - return set + offset; - offset += BITS_PER_LONG; - size -= BITS_PER_LONG; - p++; - } - return offset + find_first_bit(p, size); + return fls64((unsigned int)word); } -#define find_next_bit find_next_bit -/* - * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. - */ -static inline int sched_find_first_bit(unsigned long *b) -{ - return find_first_bit(b, 140); -} +#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ -#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/fls64.h> +#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */ + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> - -/* - * ATTENTION: intel byte ordering convention for ext2 and minix !! - * bit 0 is the LSB of addr; bit 31 is the MSB of addr; - * bit 32 is the LSB of (addr+4). - * That combined with the little endian byte order of Intel gives the - * following bit order in memory: - * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \ - * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 - */ - -static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) -{ - unsigned long bytes, bits; - - if (!size) - return 0; - bytes = __ffz_word_loop(vaddr, size); - bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); - return (bits < size) ? bits : size; -} -#define find_first_zero_bit_le find_first_zero_bit_le - -static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, - unsigned long offset) -{ - unsigned long *addr = vaddr, *p; - unsigned long bit, set; - - if (offset >= size) - return size; - bit = offset & (BITS_PER_LONG - 1); - offset -= bit; - size -= offset; - p = addr + offset / BITS_PER_LONG; - if (bit) { - /* - * s390 version of ffz returns BITS_PER_LONG - * if no zero bit is present in the word. - */ - set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit); - if (set >= size) - return size + offset; - if (set < BITS_PER_LONG) - return set + offset; - offset += BITS_PER_LONG; - size -= BITS_PER_LONG; - p++; - } - return offset + find_first_zero_bit_le(p, size); -} -#define find_next_zero_bit_le find_next_zero_bit_le - -static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) -{ - unsigned long bytes, bits; - - if (!size) - return 0; - bytes = __ffs_word_loop(vaddr, size); - bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); - return (bits < size) ? bits : size; -} -#define find_first_bit_le find_first_bit_le - -static inline int find_next_bit_le(void *vaddr, unsigned long size, - unsigned long offset) -{ - unsigned long *addr = vaddr, *p; - unsigned long bit, set; - - if (offset >= size) - return size; - bit = offset & (BITS_PER_LONG - 1); - offset -= bit; - size -= offset; - p = addr + offset / BITS_PER_LONG; - if (bit) { - /* - * s390 version of ffz returns BITS_PER_LONG - * if no zero bit is present in the word. - */ - set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit)); - if (set >= size) - return size + offset; - if (set < BITS_PER_LONG) - return set + offset; - offset += BITS_PER_LONG; - size -= BITS_PER_LONG; - p++; - } - return offset + find_first_bit_le(p, size); -} -#define find_next_bit_le find_next_bit_le - +#include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/le.h> - #include <asm-generic/bitops/ext2-atomic-setbit.h> #endif /* _S390_BITOPS_H */ diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index f201af8be58..b80e456d642 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h @@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); #define to_ccwdev(n) container_of(n, struct ccw_device, dev) #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) -extern struct ccw_device *ccw_device_probe_console(void); +extern struct ccw_device *ccw_device_create_console(struct ccw_driver *); +extern void ccw_device_destroy_console(struct ccw_device *); +extern int ccw_device_enable_console(struct ccw_device *); extern void ccw_device_wait_idle(struct ccw_device *); extern int ccw_device_force_console(struct ccw_device *); @@ -227,5 +229,5 @@ int ccw_device_siosl(struct ccw_device *); extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *); -extern void *ccw_device_get_chp_desc(struct ccw_device *, int); +struct channel_path_desc *ccw_device_get_chp_desc(struct ccw_device *, int); #endif /* _S390_CCWDEV_H_ */ diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index 23723ce5ca7..057ce0ca637 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h @@ -10,6 +10,8 @@ struct ccw_driver; * @count: number of attached slave devices * @dev: embedded device structure * @cdev: variable number of slave devices, allocated as needed + * @ungroup_work: work to be done when a ccwgroup notifier has action + * type %BUS_NOTIFY_UNBIND_DRIVER */ struct ccwgroup_device { enum { @@ -22,6 +24,7 @@ struct ccwgroup_device { /* public: */ unsigned int count; struct device dev; + struct work_struct ungroup_work; struct ccw_device *cdev[0]; }; diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index 4f57a4f3909..74036485635 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum) * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary * - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. + * Copy from userspace and compute checksum. */ static inline __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - int missing; - - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); + if (unlikely(copy_from_user(dst, src, len))) *err_ptr = -EFAULT; - } - return csum_partial(dst, len, sum); } diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h index 38c405ef89c..7298eec9854 100644 --- a/arch/s390/include/asm/chpid.h +++ b/arch/s390/include/asm/chpid.h @@ -8,6 +8,17 @@ #include <uapi/asm/chpid.h> #include <asm/cio.h> +struct channel_path_desc { + u8 flags; + u8 lsn; + u8 desc; + u8 chpid; + u8 swla; + u8 zeroes; + u8 chla; + u8 chpp; +} __packed; + static inline void chp_id_init(struct chp_id *chpid) { memset(chpid, 0, sizeof(struct chp_id)); diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index d42625053c3..09633920776 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -199,7 +199,7 @@ struct esw_eadm { /** * struct irb - interruption response block * @scsw: subchannel status word - * @esw: extened status word + * @esw: extended status word * @ecw: extended control word * * The irb that is handed to the device driver when an interrupt occurs. For diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 0f636cbdf34..4236408070e 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -185,11 +185,12 @@ static inline unsigned long long __cmpxchg64(void *ptr, { register_pair rp_old = {.pair = old}; register_pair rp_new = {.pair = new}; + unsigned long long *ullptr = ptr; asm volatile( " cds %0,%2,%1" - : "+&d" (rp_old), "=Q" (ptr) - : "d" (rp_new), "Q" (ptr) + : "+d" (rp_old), "+Q" (*ullptr) + : "d" (rp_new) : "memory", "cc"); return rp_old.pair; } diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index c1e7c646727..d350ed9d0fb 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -8,7 +8,11 @@ #include <linux/thread_info.h> #define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) -#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)) + +#define __SC_DELOUSE(t,v) ({ \ + BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \ + (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \ +}) #define PSW32_MASK_PER 0x40000000UL #define PSW32_MASK_DAT 0x04000000UL @@ -22,6 +26,7 @@ #define PSW32_MASK_ASC 0x0000C000UL #define PSW32_MASK_CC 0x00003000UL #define PSW32_MASK_PM 0x00000f00UL +#define PSW32_MASK_RI 0x00000080UL #define PSW32_MASK_USER 0x0000FF00UL @@ -35,7 +40,10 @@ #define PSW32_ASC_SECONDARY 0x00008000UL #define PSW32_ASC_HOME 0x0000C000UL -extern u32 psw32_user_bits; +#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ + PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ + PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \ + PSW32_ASC_PRIMARY) #define COMPAT_USER_HZ 100 #define COMPAT_UTS_MACHINE "s390\0\0\0\0" diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index c879fad404c..cb700d54bd8 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -56,6 +56,96 @@ struct cpumf_ctr_info { u32 reserved2[12]; } __packed; +/* QUERY SAMPLING INFORMATION block */ +struct hws_qsi_info_block { /* Bit(s) */ + unsigned int b0_13:14; /* 0-13: zeros */ + unsigned int as:1; /* 14: basic-sampling authorization */ + unsigned int ad:1; /* 15: diag-sampling authorization */ + unsigned int b16_21:6; /* 16-21: zeros */ + unsigned int es:1; /* 22: basic-sampling enable control */ + unsigned int ed:1; /* 23: diag-sampling enable control */ + unsigned int b24_29:6; /* 24-29: zeros */ + unsigned int cs:1; /* 30: basic-sampling activation control */ + unsigned int cd:1; /* 31: diag-sampling activation control */ + unsigned int bsdes:16; /* 4-5: size of basic sampling entry */ + unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */ + unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */ + unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/ + unsigned long tear; /* 24-31: TEAR contents */ + unsigned long dear; /* 32-39: DEAR contents */ + unsigned int rsvrd0; /* 40-43: reserved */ + unsigned int cpu_speed; /* 44-47: CPU speed */ + unsigned long long rsvrd1; /* 48-55: reserved */ + unsigned long long rsvrd2; /* 56-63: reserved */ +} __packed; + +/* SET SAMPLING CONTROLS request block */ +struct hws_lsctl_request_block { + unsigned int s:1; /* 0: maximum buffer indicator */ + unsigned int h:1; /* 1: part. level reserved for VM use*/ + unsigned long long b2_53:52;/* 2-53: zeros */ + unsigned int es:1; /* 54: basic-sampling enable control */ + unsigned int ed:1; /* 55: diag-sampling enable control */ + unsigned int b56_61:6; /* 56-61: - zeros */ + unsigned int cs:1; /* 62: basic-sampling activation control */ + unsigned int cd:1; /* 63: diag-sampling activation control */ + unsigned long interval; /* 8-15: sampling interval */ + unsigned long tear; /* 16-23: TEAR contents */ + unsigned long dear; /* 24-31: DEAR contents */ + /* 32-63: */ + unsigned long rsvrd1; /* reserved */ + unsigned long rsvrd2; /* reserved */ + unsigned long rsvrd3; /* reserved */ + unsigned long rsvrd4; /* reserved */ +} __packed; + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int:16; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +} __packed; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:14; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +} __packed; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +} __packed; + +struct hws_trailer_entry { + union { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned long long:61; /* 3 - 63: Reserved */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; + unsigned long long overflow; /* 64 - sample Overflow count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + unsigned long long progusage1; /* 48 - reserved for programming use */ + unsigned long long progusage2; /* */ +} __packed; + /* Query counter information */ static inline int qctri(struct cpumf_ctr_info *info) { @@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val) return cc; } +/* Query sampling information */ +static inline int qsi(struct hws_qsi_info_block *info) +{ + int cc; + cc = 1; + + asm volatile( + "0: .insn s,0xb2860000,0(%1)\n" + "1: lhi %0,0\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "=d" (cc), "+a" (info) + : "m" (*info) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Load sampling controls */ +static inline int lsctl(struct hws_lsctl_request_block *req) +{ + int cc; + + cc = 1; + asm volatile( + "0: .insn s,0xb2870000,0(%1)\n" + "1: ipm %0\n" + " srl %0,28\n" + "2:\n" + EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) + : "+d" (cc), "+a" (req) + : "m" (*req) + : "cc", "memory"); + + return cc ? -EINVAL : 0; +} + +/* Sampling control helper functions */ + +#include <linux/time.h> + +static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi, + unsigned long freq) +{ + return (USEC_PER_SEC / freq) * qsi->cpu_speed; +} + +static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi, + unsigned long rate) +{ + return USEC_PER_SEC * qsi->cpu_speed / rate; +} + +#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL +#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL + +/* Return TOD timestamp contained in an trailer entry */ +static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te) +{ + /* TOD in STCKE format */ + if (te->t) + return *((unsigned long long *) &te->timestamp[1]); + + /* TOD in STCK format */ + return *((unsigned long long *) &te->timestamp[0]); +} + +/* Return pointer to trailer entry of an sample data block */ +static inline unsigned long *trailer_entry_ptr(unsigned long v) +{ + void *ret; + + ret = (void *) v; + ret += PAGE_SIZE; + ret -= sizeof(struct hws_trailer_entry); + + return (unsigned long *) ret; +} + +/* Return if the entry in the sample data block table (sdbt) + * is a link to the next sdbt */ +static inline int is_link_entry(unsigned long *s) +{ + return *s & 0x1ul ? 1 : 0; +} + +/* Return pointer to the linked sdbt */ +static inline unsigned long *get_next_sdbt(unsigned long *s) +{ + return (unsigned long *) (*s & ~0x1ul); +} #endif /* _ASM_S390_CPU_MF_H */ diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h index 7e1c917bbba..09d1dd46bd5 100644 --- a/arch/s390/include/asm/css_chars.h +++ b/arch/s390/include/asm/css_chars.h @@ -29,6 +29,8 @@ struct css_general_char { u32 fcx : 1; /* bit 88 */ u32 : 19; u32 alt_ssi : 1; /* bit 108 */ + u32:1; + u32 narf:1; /* bit 110 */ } __packed; extern struct css_general_char css_general_characteristics; diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index debfda33d1f..31ab9f346d7 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -7,70 +7,76 @@ #ifndef __ASM_CTL_REG_H #define __ASM_CTL_REG_H -#ifdef CONFIG_64BIT - -#define __ctl_load(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), \ - "i" (low), "i" (high)); \ - }) - -#define __ctl_store(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " stctg %1,%2,%0\n" \ - : "=Q" (*(addrtype *)(&array)) \ - : "i" (low), "i" (high)); \ - }) - -#else /* CONFIG_64BIT */ - -#define __ctl_load(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " lctl %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), \ - "i" (low), "i" (high)); \ -}) - -#define __ctl_store(array, low, high) ({ \ - typedef struct { char _[sizeof(array)]; } addrtype; \ - asm volatile( \ - " stctl %1,%2,%0\n" \ - : "=Q" (*(addrtype *)(&array)) \ - : "i" (low), "i" (high)); \ - }) - -#endif /* CONFIG_64BIT */ - -#define __ctl_set_bit(cr, bit) ({ \ - unsigned long __dummy; \ - __ctl_store(__dummy, cr, cr); \ - __dummy |= 1UL << (bit); \ - __ctl_load(__dummy, cr, cr); \ -}) +#include <linux/bug.h> -#define __ctl_clear_bit(cr, bit) ({ \ - unsigned long __dummy; \ - __ctl_store(__dummy, cr, cr); \ - __dummy &= ~(1UL << (bit)); \ - __ctl_load(__dummy, cr, cr); \ -}) +#ifdef CONFIG_64BIT +# define __CTL_LOAD "lctlg" +# define __CTL_STORE "stctg" +#else +# define __CTL_LOAD "lctl" +# define __CTL_STORE "stctl" +#endif + +#define __ctl_load(array, low, high) { \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + \ + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ + asm volatile( \ + __CTL_LOAD " %1,%2,%0\n" \ + : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ +} + +#define __ctl_store(array, low, high) { \ + typedef struct { char _[sizeof(array)]; } addrtype; \ + \ + BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ + asm volatile( \ + __CTL_STORE " %1,%2,%0\n" \ + : "=Q" (*(addrtype *)(&array)) \ + : "i" (low), "i" (high)); \ +} + +static inline void __ctl_set_bit(unsigned int cr, unsigned int bit) +{ + unsigned long reg; + + __ctl_store(reg, cr, cr); + reg |= 1UL << bit; + __ctl_load(reg, cr, cr); +} + +static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) +{ + unsigned long reg; + + __ctl_store(reg, cr, cr); + reg &= ~(1UL << bit); + __ctl_load(reg, cr, cr); +} + +void smp_ctl_set_bit(int cr, int bit); +void smp_ctl_clear_bit(int cr, int bit); + +union ctlreg0 { + unsigned long val; + struct { +#ifdef CONFIG_64BIT + unsigned long : 32; +#endif + unsigned long : 3; + unsigned long lap : 1; /* Low-address-protection control */ + unsigned long : 4; + unsigned long edat : 1; /* Enhanced-DAT-enablement control */ + unsigned long : 23; + }; +}; #ifdef CONFIG_SMP - -extern void smp_ctl_set_bit(int cr, int bit); -extern void smp_ctl_clear_bit(int cr, int bit); -#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) - +# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) +# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) #else - -#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) -#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) - -#endif /* CONFIG_SMP */ +# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) +# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) +#endif #endif /* __ASM_CTL_REG_H */ diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 188c5052a20..530c15eb01e 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level); void debug_set_critical(void); void debug_stop_all(void); +static inline bool debug_level_enabled(debug_info_t* id, int level) +{ + return level <= id->level; +} + static inline debug_entry_t* debug_event(debug_info_t* id, int level, void* data, int length) { diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h new file mode 100644 index 00000000000..04a83f5773c --- /dev/null +++ b/arch/s390/include/asm/dis.h @@ -0,0 +1,52 @@ +/* + * Disassemble s390 instructions. + * + * Copyright IBM Corp. 2007 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#ifndef __ASM_S390_DIS_H__ +#define __ASM_S390_DIS_H__ + +/* Type of operand */ +#define OPERAND_GPR 0x1 /* Operand printed as %rx */ +#define OPERAND_FPR 0x2 /* Operand printed as %fx */ +#define OPERAND_AR 0x4 /* Operand printed as %ax */ +#define OPERAND_CR 0x8 /* Operand printed as %cx */ +#define OPERAND_DISP 0x10 /* Operand printed as displacement */ +#define OPERAND_BASE 0x20 /* Operand printed as base register */ +#define OPERAND_INDEX 0x40 /* Operand printed as index register */ +#define OPERAND_PCREL 0x80 /* Operand printed as pc-relative symbol */ +#define OPERAND_SIGNED 0x100 /* Operand printed as signed value */ +#define OPERAND_LENGTH 0x200 /* Operand printed as length (+1) */ + + +struct s390_operand { + int bits; /* The number of bits in the operand. */ + int shift; /* The number of bits to shift. */ + int flags; /* One bit syntax flags. */ +}; + +struct s390_insn { + const char name[5]; + unsigned char opfrag; + unsigned char format; +}; + + +static inline int insn_length(unsigned char code) +{ + return ((((int) code + 64) >> 7) + 1) << 1; +} + +void show_code(struct pt_regs *regs); +void print_fn_code(unsigned char *code, unsigned long len); +int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len); +struct s390_insn *find_insn(unsigned char *code); + +static inline int is_known_insn(unsigned char *code) +{ + return !!find_insn(code); +} + +#endif /* __ASM_S390_DIS_H__ */ diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index dc9200ca32e..67026300c88 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h @@ -111,18 +111,7 @@ struct scm_driver { int scm_driver_register(struct scm_driver *scmdrv); void scm_driver_unregister(struct scm_driver *scmdrv); -int scm_start_aob(struct aob *aob); +int eadm_start_aob(struct aob *aob); void scm_irq_handler(struct aob *aob, int error); -struct eadm_ops { - int (*eadm_start) (struct aob *aob); - struct module *owner; -}; - -int scm_get_ref(void); -void scm_put_ref(void); - -void register_eadm_ops(struct eadm_ops *ops); -void unregister_eadm_ops(struct eadm_ops *ops); - #endif /* _ASM_S390_EADM_H */ diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index ef617099507..7ecb92b469b 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h @@ -12,9 +12,9 @@ #define TCW_FORMAT_DEFAULT 0 #define TCW_TIDAW_FORMAT_DEFAULT 0 -#define TCW_FLAGS_INPUT_TIDA 1 << (23 - 5) -#define TCW_FLAGS_TCCB_TIDA 1 << (23 - 6) -#define TCW_FLAGS_OUTPUT_TIDA 1 << (23 - 7) +#define TCW_FLAGS_INPUT_TIDA (1 << (23 - 5)) +#define TCW_FLAGS_TCCB_TIDA (1 << (23 - 6)) +#define TCW_FLAGS_OUTPUT_TIDA (1 << (23 - 7)) #define TCW_FLAGS_TIDAW_FORMAT(x) ((x) & 3) << (23 - 9) #define TCW_FLAGS_GET_TIDAW_FORMAT(x) (((x) >> (23 - 9)) & 3) @@ -54,11 +54,11 @@ struct tcw { u32 intrg; } __attribute__ ((packed, aligned(64))); -#define TIDAW_FLAGS_LAST 1 << (7 - 0) -#define TIDAW_FLAGS_SKIP 1 << (7 - 1) -#define TIDAW_FLAGS_DATA_INT 1 << (7 - 2) -#define TIDAW_FLAGS_TTIC 1 << (7 - 3) -#define TIDAW_FLAGS_INSERT_CBC 1 << (7 - 4) +#define TIDAW_FLAGS_LAST (1 << (7 - 0)) +#define TIDAW_FLAGS_SKIP (1 << (7 - 1)) +#define TIDAW_FLAGS_DATA_INT (1 << (7 - 2)) +#define TIDAW_FLAGS_TTIC (1 << (7 - 3)) +#define TIDAW_FLAGS_INSERT_CBC (1 << (7 - 4)) /** * struct tidaw - Transport-Indirect-Addressing Word (TIDAW) @@ -106,9 +106,9 @@ struct tsa_ddpc { u8 sense[32]; } __attribute__ ((packed)); -#define TSA_INTRG_FLAGS_CU_STATE_VALID 1 << (7 - 0) -#define TSA_INTRG_FLAGS_DEV_STATE_VALID 1 << (7 - 1) -#define TSA_INTRG_FLAGS_OP_STATE_VALID 1 << (7 - 2) +#define TSA_INTRG_FLAGS_CU_STATE_VALID (1 << (7 - 0)) +#define TSA_INTRG_FLAGS_DEV_STATE_VALID (1 << (7 - 1)) +#define TSA_INTRG_FLAGS_OP_STATE_VALID (1 << (7 - 2)) /** * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA) @@ -140,10 +140,10 @@ struct tsa_intrg { #define TSB_FORMAT_DDPC 2 #define TSB_FORMAT_INTRG 3 -#define TSB_FLAGS_DCW_OFFSET_VALID 1 << (7 - 0) -#define TSB_FLAGS_COUNT_VALID 1 << (7 - 1) -#define TSB_FLAGS_CACHE_MISS 1 << (7 - 2) -#define TSB_FLAGS_TIME_VALID 1 << (7 - 3) +#define TSB_FLAGS_DCW_OFFSET_VALID (1 << (7 - 0)) +#define TSB_FLAGS_COUNT_VALID (1 << (7 - 1)) +#define TSB_FLAGS_CACHE_MISS (1 << (7 - 2)) +#define TSB_FLAGS_TIME_VALID (1 << (7 - 3)) #define TSB_FLAGS_FORMAT(x) ((x) & 7) #define TSB_FORMAT(t) ((t)->flags & 7) @@ -179,9 +179,9 @@ struct tsb { #define DCW_INTRG_RCQ_PRIMARY 1 #define DCW_INTRG_RCQ_SECONDARY 2 -#define DCW_INTRG_FLAGS_MPM 1 < (7 - 0) -#define DCW_INTRG_FLAGS_PPR 1 < (7 - 1) -#define DCW_INTRG_FLAGS_CRIT 1 < (7 - 2) +#define DCW_INTRG_FLAGS_MPM (1 << (7 - 0)) +#define DCW_INTRG_FLAGS_PPR (1 << (7 - 1)) +#define DCW_INTRG_FLAGS_CRIT (1 << (7 - 2)) /** * struct dcw_intrg_data - Interrogate DCW data @@ -216,7 +216,7 @@ struct dcw_intrg_data { u8 prog_data[0]; } __attribute__ ((packed)); -#define DCW_FLAGS_CC 1 << (7 - 1) +#define DCW_FLAGS_CC (1 << (7 - 1)) #define DCW_CMD_WRITE 0x01 #define DCW_CMD_READ 0x02 diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index 51bcaa0fdee..a4811aa0304 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -1,23 +1,63 @@ #ifndef _ASM_S390_FUTEX_H #define _ASM_S390_FUTEX_H -#include <linux/futex.h> #include <linux/uaccess.h> +#include <linux/futex.h> +#include <asm/mmu_context.h> #include <asm/errno.h> -static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 768\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; int oparg = (encoded_op << 8) >> 20; int cmparg = (encoded_op << 20) >> 20; - int oldval, ret; + int oldval = 0, newval, ret; + load_kernel_asce(); if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; pagefault_disable(); - ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } pagefault_enable(); if (!ret) { @@ -37,7 +77,20 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { - return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); + int ret; + + load_kernel_asce(); + asm volatile( + " sacf 256\n" + "0: cs %1,%4,0(%5)\n" + "1: la %0,0\n" + "2: sacf 768\n" + EX_TABLE(0b,2b) EX_TABLE(1b,2b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory"); + *uval = oldval; + return ret; } #endif /* _ASM_S390_FUTEX_H */ diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index a908d2941c5..b7eabaaeffb 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -18,8 +18,6 @@ #define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_IRQ_EXIT_IRQS_DISABLED -#define HARDIRQ_BITS 8 - static inline void ack_bad_irq(unsigned int irq) { printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 2bd6cb897b9..2fcccc0c997 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -7,6 +7,7 @@ #ifndef _ASM_S390_IPL_H #define _ASM_S390_IPL_H +#include <asm/lowcore.h> #include <asm/types.h> #include <asm/cio.h> #include <asm/setup.h> @@ -86,7 +87,14 @@ struct ipl_parameter_block { */ extern u32 ipl_flags; extern u32 dump_prefix_page; -extern unsigned int zfcpdump_prefix_array[]; + +struct dump_save_areas { + struct save_area **areas; + int count; +}; + +extern struct dump_save_areas dump_save_areas; +struct save_area *dump_save_area_create(int cpu); extern void do_reipl(void); extern void do_halt(void); diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 5f8bcc5fe42..c4dd400a279 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -16,6 +16,20 @@ /* This number is used when no interrupt has been assigned */ #define NO_IRQ 0 +/* External interruption codes */ +#define EXT_IRQ_INTERRUPT_KEY 0x0040 +#define EXT_IRQ_CLK_COMP 0x1004 +#define EXT_IRQ_CPU_TIMER 0x1005 +#define EXT_IRQ_WARNING_TRACK 0x1007 +#define EXT_IRQ_MALFUNC_ALERT 0x1200 +#define EXT_IRQ_EMERGENCY_SIG 0x1201 +#define EXT_IRQ_EXTERNAL_CALL 0x1202 +#define EXT_IRQ_TIMING_ALERT 0x1406 +#define EXT_IRQ_MEASURE_ALERT 0x1407 +#define EXT_IRQ_SERVICE_SIG 0x2401 +#define EXT_IRQ_CP_SERVICE 0x2603 +#define EXT_IRQ_IUCV 0x4000 + #ifndef __ASSEMBLY__ #include <linux/hardirq.h> @@ -53,6 +67,7 @@ enum interruption_class { IRQIO_PCI, IRQIO_MSI, IRQIO_VIR, + IRQIO_VAI, NMI_NMI, CPU_RST, NR_ARCH_IRQS @@ -76,8 +91,8 @@ struct ext_code { typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long); -int register_external_interrupt(u16 code, ext_int_handler_t handler); -int unregister_external_interrupt(u16 code, ext_int_handler_t handler); +int register_external_irq(u16 code, ext_int_handler_t handler); +int unregister_external_irq(u16 code, ext_int_handler_t handler); enum irq_subclass { IRQ_SUBCLASS_MEASUREMENT_ALERT = 5, diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h index 6c32190dc73..346b1c85ffb 100644 --- a/arch/s390/include/asm/jump_label.h +++ b/arch/s390/include/asm/jump_label.h @@ -15,7 +15,7 @@ static __always_inline bool arch_static_branch(struct static_key *key) { - asm goto("0: brcl 0,0\n" + asm_volatile_goto("0: brcl 0,0\n" ".pushsection __jump_table, \"aw\"\n" ASM_ALIGN "\n" ASM_PTR " 0b, %l[label], %0\n" diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index e87ecaa2c56..4181d7baabb 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -16,35 +16,48 @@ #include <linux/hrtimer.h> #include <linux/interrupt.h> #include <linux/kvm_host.h> +#include <linux/kvm.h> #include <asm/debug.h> #include <asm/cpu.h> +#include <asm/isc.h> #define KVM_MAX_VCPUS 64 #define KVM_USER_MEM_SLOTS 32 +/* + * These seem to be used for allocating ->chip in the routing table, + * which we don't use. 4096 is an out-of-thin-air value. If we need + * to look at ->chip later on, we'll need to revisit this. + */ +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 4096 + +#define SIGP_CTRL_C 0x00800000 + struct sca_entry { - atomic_t scn; + atomic_t ctrl; __u32 reserved; __u64 sda; __u64 reserved2[2]; } __attribute__((packed)); +union ipte_control { + unsigned long val; + struct { + unsigned long k : 1; + unsigned long kh : 31; + unsigned long kg : 32; + }; +}; struct sca_block { - __u64 ipte_control; + union ipte_control ipte_control; __u64 reserved[5]; __u64 mcn; __u64 reserved2; struct sca_entry cpu[64]; } __attribute__((packed)); -#define KVM_NR_PAGE_SIZES 2 -#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 8) -#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) -#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) -#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) -#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) - #define CPUSTAT_STOPPED 0x80000000 #define CPUSTAT_WAIT 0x10000000 #define CPUSTAT_ECALL_PEND 0x08000000 @@ -61,6 +74,7 @@ struct sca_block { #define CPUSTAT_ZARCH 0x00000800 #define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_SM 0x00000080 +#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_G 0x00000008 #define CPUSTAT_GED 0x00000004 #define CPUSTAT_J 0x00000002 @@ -68,7 +82,9 @@ struct sca_block { struct kvm_s390_sie_block { atomic_t cpuflags; /* 0x0000 */ - __u32 prefix; /* 0x0004 */ + __u32 : 1; /* 0x0004 */ + __u32 prefix : 18; + __u32 : 13; __u8 reserved08[4]; /* 0x0008 */ #define PROG_IN_SIE (1<<0) __u32 prog0c; /* 0x000c */ @@ -82,12 +98,27 @@ struct kvm_s390_sie_block { __u8 reserved40[4]; /* 0x0040 */ #define LCTL_CR0 0x8000 #define LCTL_CR6 0x0200 +#define LCTL_CR9 0x0040 +#define LCTL_CR10 0x0020 +#define LCTL_CR11 0x0010 #define LCTL_CR14 0x0002 __u16 lctl; /* 0x0044 */ __s16 icpua; /* 0x0046 */ -#define ICTL_LPSW 0x00400000 +#define ICTL_PINT 0x20000000 +#define ICTL_LPSW 0x00400000 +#define ICTL_STCTL 0x00040000 +#define ICTL_ISKE 0x00004000 +#define ICTL_SSKE 0x00002000 +#define ICTL_RRBE 0x00001000 +#define ICTL_TPROT 0x00000200 __u32 ictl; /* 0x0048 */ __u32 eca; /* 0x004c */ +#define ICPT_INST 0x04 +#define ICPT_PROGI 0x08 +#define ICPT_INSTPROGI 0x0C +#define ICPT_OPEREXC 0x2C +#define ICPT_PARTEXEC 0x38 +#define ICPT_IOINST 0x40 __u8 icptcode; /* 0x0050 */ __u8 reserved51; /* 0x0051 */ __u16 ihcpu; /* 0x0052 */ @@ -106,16 +137,48 @@ struct kvm_s390_sie_block { psw_t gpsw; /* 0x0090 */ __u64 gg14; /* 0x00a0 */ __u64 gg15; /* 0x00a8 */ - __u8 reservedb0[30]; /* 0x00b0 */ - __u16 iprcc; /* 0x00ce */ - __u8 reservedd0[48]; /* 0x00d0 */ + __u8 reservedb0[20]; /* 0x00b0 */ + __u16 extcpuaddr; /* 0x00c4 */ + __u16 eic; /* 0x00c6 */ + __u32 reservedc8; /* 0x00c8 */ + __u16 pgmilc; /* 0x00cc */ + __u16 iprcc; /* 0x00ce */ + __u32 dxc; /* 0x00d0 */ + __u16 mcn; /* 0x00d4 */ + __u8 perc; /* 0x00d6 */ + __u8 peratmid; /* 0x00d7 */ + __u64 peraddr; /* 0x00d8 */ + __u8 eai; /* 0x00e0 */ + __u8 peraid; /* 0x00e1 */ + __u8 oai; /* 0x00e2 */ + __u8 armid; /* 0x00e3 */ + __u8 reservede4[4]; /* 0x00e4 */ + __u64 tecmc; /* 0x00e8 */ + __u8 reservedf0[16]; /* 0x00f0 */ __u64 gcr[16]; /* 0x0100 */ __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ __u32 fac; /* 0x01a0 */ - __u8 reserved1a4[92]; /* 0x01a4 */ + __u8 reserved1a4[20]; /* 0x01a4 */ + __u64 cbrlo; /* 0x01b8 */ + __u8 reserved1c0[30]; /* 0x01c0 */ + __u64 pp; /* 0x01de */ + __u8 reserved1e6[2]; /* 0x01e6 */ + __u64 itdba; /* 0x01e8 */ + __u8 reserved1f0[16]; /* 0x01f0 */ } __attribute__((packed)); +struct kvm_s390_itdb { + __u8 data[256]; +} __packed; + +struct sie_page { + struct kvm_s390_sie_block sie_block; + __u8 reserved200[1024]; /* 0x0200 */ + struct kvm_s390_itdb itdb; /* 0x0600 */ + __u8 reserved700[2304]; /* 0x0700 */ +} __packed; + struct kvm_vcpu_stat { u32 exit_userspace; u32 exit_null; @@ -126,6 +189,8 @@ struct kvm_vcpu_stat { u32 exit_instruction; u32 instruction_lctl; u32 instruction_lctlg; + u32 instruction_stctl; + u32 instruction_stctg; u32 exit_program_interruption; u32 exit_instr_and_program; u32 deliver_external_call; @@ -144,11 +209,13 @@ struct kvm_vcpu_stat { u32 instruction_stpx; u32 instruction_stap; u32 instruction_storage_key; + u32 instruction_ipte_interlock; u32 instruction_stsch; u32 instruction_chsc; u32 instruction_stsi; u32 instruction_stfl; u32 instruction_tprot; + u32 instruction_essa; u32 instruction_sigp_sense; u32 instruction_sigp_sense_running; u32 instruction_sigp_external_call; @@ -162,46 +229,58 @@ struct kvm_vcpu_stat { u32 diagnose_9c; }; -struct kvm_s390_io_info { - __u16 subchannel_id; /* 0x0b8 */ - __u16 subchannel_nr; /* 0x0ba */ - __u32 io_int_parm; /* 0x0bc */ - __u32 io_int_word; /* 0x0c0 */ -}; - -struct kvm_s390_ext_info { - __u32 ext_params; - __u64 ext_params2; -}; - -#define PGM_OPERATION 0x01 -#define PGM_PRIVILEGED_OP 0x02 -#define PGM_EXECUTE 0x03 -#define PGM_PROTECTION 0x04 -#define PGM_ADDRESSING 0x05 -#define PGM_SPECIFICATION 0x06 -#define PGM_DATA 0x07 - -struct kvm_s390_pgm_info { - __u16 code; -}; - -struct kvm_s390_prefix_info { - __u32 address; -}; - -struct kvm_s390_extcall_info { - __u16 code; -}; - -struct kvm_s390_emerg_info { - __u16 code; -}; - -struct kvm_s390_mchk_info { - __u64 cr14; - __u64 mcic; -}; +#define PGM_OPERATION 0x01 +#define PGM_PRIVILEGED_OP 0x02 +#define PGM_EXECUTE 0x03 +#define PGM_PROTECTION 0x04 +#define PGM_ADDRESSING 0x05 +#define PGM_SPECIFICATION 0x06 +#define PGM_DATA 0x07 +#define PGM_FIXED_POINT_OVERFLOW 0x08 +#define PGM_FIXED_POINT_DIVIDE 0x09 +#define PGM_DECIMAL_OVERFLOW 0x0a +#define PGM_DECIMAL_DIVIDE 0x0b +#define PGM_HFP_EXPONENT_OVERFLOW 0x0c +#define PGM_HFP_EXPONENT_UNDERFLOW 0x0d +#define PGM_HFP_SIGNIFICANCE 0x0e +#define PGM_HFP_DIVIDE 0x0f +#define PGM_SEGMENT_TRANSLATION 0x10 +#define PGM_PAGE_TRANSLATION 0x11 +#define PGM_TRANSLATION_SPEC 0x12 +#define PGM_SPECIAL_OPERATION 0x13 +#define PGM_OPERAND 0x15 +#define PGM_TRACE_TABEL 0x16 +#define PGM_SPACE_SWITCH 0x1c +#define PGM_HFP_SQUARE_ROOT 0x1d +#define PGM_PC_TRANSLATION_SPEC 0x1f +#define PGM_AFX_TRANSLATION 0x20 +#define PGM_ASX_TRANSLATION 0x21 +#define PGM_LX_TRANSLATION 0x22 +#define PGM_EX_TRANSLATION 0x23 +#define PGM_PRIMARY_AUTHORITY 0x24 +#define PGM_SECONDARY_AUTHORITY 0x25 +#define PGM_LFX_TRANSLATION 0x26 +#define PGM_LSX_TRANSLATION 0x27 +#define PGM_ALET_SPECIFICATION 0x28 +#define PGM_ALEN_TRANSLATION 0x29 +#define PGM_ALE_SEQUENCE 0x2a +#define PGM_ASTE_VALIDITY 0x2b +#define PGM_ASTE_SEQUENCE 0x2c +#define PGM_EXTENDED_AUTHORITY 0x2d +#define PGM_LSTE_SEQUENCE 0x2e +#define PGM_ASTE_INSTANCE 0x2f +#define PGM_STACK_FULL 0x30 +#define PGM_STACK_EMPTY 0x31 +#define PGM_STACK_SPECIFICATION 0x32 +#define PGM_STACK_TYPE 0x33 +#define PGM_STACK_OPERATION 0x34 +#define PGM_ASCE_TYPE 0x38 +#define PGM_REGION_FIRST_TRANS 0x39 +#define PGM_REGION_SECOND_TRANS 0x3a +#define PGM_REGION_THIRD_TRANS 0x3b +#define PGM_MONITOR 0x40 +#define PGM_PER 0x80 +#define PGM_CRYPTO_OPERATION 0x119 struct kvm_s390_interrupt_info { struct list_head list; @@ -220,7 +299,6 @@ struct kvm_s390_interrupt_info { /* for local_interrupt.action_flags */ #define ACTION_STORE_ON_STOP (1<<0) #define ACTION_STOP_ON_STOP (1<<1) -#define ACTION_RELOADVCPU_ON_STOP (1<<2) struct kvm_s390_local_interrupt { spinlock_t lock; @@ -238,11 +316,49 @@ struct kvm_s390_float_interrupt { struct list_head list; atomic_t active; int next_rr_cpu; - unsigned long idle_mask[(KVM_MAX_VCPUS + sizeof(long) - 1) - / sizeof(long)]; - struct kvm_s390_local_interrupt *local_int[KVM_MAX_VCPUS]; + unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)]; + unsigned int irq_count; +}; + +struct kvm_hw_wp_info_arch { + unsigned long addr; + unsigned long phys_addr; + int len; + char *old_data; }; +struct kvm_hw_bp_info_arch { + unsigned long addr; + int len; +}; + +/* + * Only the upper 16 bits of kvm_guest_debug->control are arch specific. + * Further KVM_GUESTDBG flags which an be used from userspace can be found in + * arch/s390/include/uapi/asm/kvm.h + */ +#define KVM_GUESTDBG_EXIT_PENDING 0x10000000 + +#define guestdbg_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) +#define guestdbg_sstep_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) +#define guestdbg_hw_bp_enabled(vcpu) \ + (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) +#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \ + (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING)) + +struct kvm_guestdbg_info_arch { + unsigned long cr0; + unsigned long cr9; + unsigned long cr10; + unsigned long cr11; + struct kvm_hw_bp_info_arch *hw_bp_info; + struct kvm_hw_wp_info_arch *hw_wp_info; + int nr_hw_bp; + int nr_hw_wp; + unsigned long last_bp; +}; struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; @@ -252,11 +368,17 @@ struct kvm_vcpu_arch { struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct tasklet_struct tasklet; + struct kvm_s390_pgm_info pgm; union { struct cpuid cpu_id; u64 stidp_data; }; struct gmap *gmap; + struct kvm_guestdbg_info_arch guestdbg; +#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) + unsigned long pfault_token; + unsigned long pfault_select; + unsigned long pfault_compare; }; struct kvm_vm_stat { @@ -266,12 +388,39 @@ struct kvm_vm_stat { struct kvm_arch_memory_slot { }; +struct s390_map_info { + struct list_head list; + __u64 guest_addr; + __u64 addr; + struct page *page; +}; + +struct s390_io_adapter { + unsigned int id; + int isc; + bool maskable; + bool masked; + bool swap; + struct rw_semaphore maps_lock; + struct list_head maps; + atomic_t nr_maps; +}; + +#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8) +#define MAX_S390_ADAPTER_MAPS 256 + struct kvm_arch{ struct sca_block *sca; debug_info_t *dbf; struct kvm_s390_float_interrupt float_int; + struct kvm_device *flic; struct gmap *gmap; int css_support; + int use_irqchip; + int use_cmma; + struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; + wait_queue_head_t ipte_wq; + spinlock_t start_stop_lock; }; #define KVM_HVA_ERR_BAD (-1UL) @@ -282,6 +431,24 @@ static inline bool kvm_is_error_hva(unsigned long addr) return IS_ERR_VALUE(addr); } +#define ASYNC_PF_PER_VCPU 64 +struct kvm_vcpu; +struct kvm_async_pf; +struct kvm_arch_async_pf { + unsigned long pfault_token; +}; + +bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu); + +void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + +void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, + struct kvm_async_pf *work); + extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern char sie_exit; #endif diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index bbf8141408c..4349197ab9d 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -56,13 +56,14 @@ struct _lowcore { __u16 pgm_code; /* 0x008e */ __u32 trans_exc_code; /* 0x0090 */ __u16 mon_class_num; /* 0x0094 */ - __u16 per_perc_atmid; /* 0x0096 */ + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ __u32 per_address; /* 0x0098 */ __u32 monitor_code; /* 0x009c */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ __u8 op_access_id; /* 0x00a2 */ - __u8 ar_access_id; /* 0x00a3 */ + __u8 ar_mode_id; /* 0x00a3 */ __u8 pad_0x00a4[0x00b8-0x00a4]; /* 0x00a4 */ __u16 subchannel_id; /* 0x00b8 */ __u16 subchannel_nr; /* 0x00ba */ @@ -93,7 +94,9 @@ struct _lowcore { __u32 save_area_sync[8]; /* 0x0200 */ __u32 save_area_async[8]; /* 0x0220 */ __u32 save_area_restart[1]; /* 0x0240 */ - __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */ + + /* CPU flags. */ + __u32 cpu_flags; /* 0x0244 */ /* Return psws. */ psw_t return_psw; /* 0x0248 */ @@ -139,12 +142,9 @@ struct _lowcore { __u32 percpu_offset; /* 0x02f0 */ __u32 machine_flags; /* 0x02f4 */ __u32 ftrace_func; /* 0x02f8 */ - __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */ - - /* Interrupt response block */ - __u8 irb[64]; /* 0x0300 */ + __u32 spinlock_lockval; /* 0x02fc */ - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */ + __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ /* * 0xe00 contains the address of the IPL Parameter Information @@ -196,12 +196,13 @@ struct _lowcore { __u16 pgm_code; /* 0x008e */ __u32 data_exc_code; /* 0x0090 */ __u16 mon_class_num; /* 0x0094 */ - __u16 per_perc_atmid; /* 0x0096 */ + __u8 per_code; /* 0x0096 */ + __u8 per_atmid; /* 0x0097 */ __u64 per_address; /* 0x0098 */ __u8 exc_access_id; /* 0x00a0 */ __u8 per_access_id; /* 0x00a1 */ __u8 op_access_id; /* 0x00a2 */ - __u8 ar_access_id; /* 0x00a3 */ + __u8 ar_mode_id; /* 0x00a3 */ __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ __u64 trans_exc_code; /* 0x00a8 */ __u64 monitor_code; /* 0x00b0 */ @@ -237,7 +238,9 @@ struct _lowcore { __u64 save_area_sync[8]; /* 0x0200 */ __u64 save_area_async[8]; /* 0x0240 */ __u64 save_area_restart[1]; /* 0x0280 */ - __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */ + + /* CPU flags. */ + __u64 cpu_flags; /* 0x0288 */ /* Return psws. */ psw_t return_psw; /* 0x0290 */ @@ -285,15 +288,13 @@ struct _lowcore { __u64 machine_flags; /* 0x0388 */ __u64 ftrace_func; /* 0x0390 */ __u64 gmap; /* 0x0398 */ - __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */ - - /* Interrupt response block. */ - __u8 irb[64]; /* 0x0400 */ + __u32 spinlock_lockval; /* 0x03a0 */ + __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ /* Per cpu primary space access list */ - __u32 paste[16]; /* 0x0440 */ + __u32 paste[16]; /* 0x0400 */ - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ + __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index ff132ac64dd..a5e656260a7 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -1,9 +1,11 @@ #ifndef __MMU_H #define __MMU_H +#include <linux/cpumask.h> #include <linux/errno.h> typedef struct { + cpumask_t cpu_attach_mask; atomic_t attach_count; unsigned int flush_mm; spinlock_t list_lock; @@ -14,6 +16,8 @@ typedef struct { unsigned long vdso_base; /* The mmu context has extended page tables. */ unsigned int has_pgste:1; + /* The mmu context uses storage keys. */ + unsigned int use_skey:1; } mm_context_t; #define INIT_MM_CONTEXT(name) \ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 9f973d8de90..3815bfea1b2 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -15,6 +15,7 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.attach_count, 0); mm->context.flush_mm = 0; mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; @@ -22,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.asce_bits |= _ASCE_TYPE_REGION3; #endif mm->context.has_pgste = 0; + mm->context.use_skey = 0; mm->context.asce_limit = STACK_TOP_MAX; crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; @@ -29,38 +31,69 @@ static inline int init_new_context(struct task_struct *tsk, #define destroy_context(mm) do { } while (0) -#ifndef CONFIG_64BIT -#define LCTL_OPCODE "lctl" -#else -#define LCTL_OPCODE "lctlg" -#endif +static inline void set_user_asce(struct mm_struct *mm) +{ + S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd); + if (current->thread.mm_segment.ar4) + __ctl_load(S390_lowcore.user_asce, 7, 7); + set_cpu_flag(CIF_ASCE); +} -static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) +static inline void clear_user_asce(void) { - pgd_t *pgd = mm->pgd; - - S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); - if (s390_user_mode != HOME_SPACE_MODE) { - /* Load primary space page table origin. */ - asm volatile(LCTL_OPCODE" 1,1,%0\n" - : : "m" (S390_lowcore.user_asce) ); - } else - /* Load home space page table origin. */ - asm volatile(LCTL_OPCODE" 13,13,%0" - : : "m" (S390_lowcore.user_asce) ); - set_fs(current->thread.mm_segment); + S390_lowcore.user_asce = S390_lowcore.kernel_asce; + + __ctl_load(S390_lowcore.user_asce, 1, 1); + __ctl_load(S390_lowcore.user_asce, 7, 7); +} + +static inline void load_kernel_asce(void) +{ + unsigned long asce; + + __ctl_store(asce, 1, 1); + if (asce != S390_lowcore.kernel_asce) + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + set_cpu_flag(CIF_ASCE); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); - update_mm(next, tsk); - atomic_dec(&prev->context.attach_count); - WARN_ON(atomic_read(&prev->context.attach_count) < 0); + int cpu = smp_processor_id(); + + if (prev == next) + return; + if (MACHINE_HAS_TLB_LC) + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + /* Clear old ASCE by loading the kernel ASCE. */ + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + __ctl_load(S390_lowcore.kernel_asce, 7, 7); atomic_inc(&next->context.attach_count); - /* Check for TLBs not flushed yet */ - __tlb_flush_mm_lazy(next); + atomic_dec(&prev->context.attach_count); + if (MACHINE_HAS_TLB_LC) + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd); +} + +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + struct task_struct *tsk = current; + struct mm_struct *mm = tsk->mm; + + load_kernel_asce(); + if (mm) { + preempt_disable(); + while (atomic_read(&mm->context.attach_count) >> 16) + cpu_relax(); + + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + if (mm->context.flush_mm) + __tlb_flush_mm(mm); + preempt_enable(); + } + set_fs(current->thread.mm_segment); } #define enter_lazy_tlb(mm,tsk) do { } while (0) @@ -69,7 +102,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm(prev, next, current); + cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + set_user_asce(next); } static inline void arch_dup_mmap(struct mm_struct *oldmm, diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h index 688271f5f2e..458c1f7fbc1 100644 --- a/arch/s390/include/asm/mutex.h +++ b/arch/s390/include/asm/mutex.h @@ -7,5 +7,3 @@ */ #include <asm-generic/mutex-dec.h> - -#define arch_mutex_cpu_relax() barrier() diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 1e51f2915b2..114258eeaac 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -30,7 +30,12 @@ #include <asm/setup.h> #ifndef __ASSEMBLY__ -void storage_key_init_range(unsigned long start, unsigned long end); +static inline void storage_key_init_range(unsigned long start, unsigned long end) +{ +#if PAGE_DEFAULT_KEY + __storage_key_init_range(start, end); +#endif +} static inline void clear_page(void *page) { @@ -43,33 +48,21 @@ static inline void clear_page(void *page) : "memory", "cc"); } +/* + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to + * bypass caches when copying a page. Especially when copying huge pages + * this keeps L1 and L2 data caches alive. + */ static inline void copy_page(void *to, void *from) { - if (MACHINE_HAS_MVPG) { - register unsigned long reg0 asm ("0") = 0; - asm volatile( - " mvpg %0,%1" - : : "a" (to), "a" (from), "d" (reg0) - : "memory", "cc"); - } else - asm volatile( - " mvc 0(256,%0),0(%1)\n" - " mvc 256(256,%0),256(%1)\n" - " mvc 512(256,%0),512(%1)\n" - " mvc 768(256,%0),768(%1)\n" - " mvc 1024(256,%0),1024(%1)\n" - " mvc 1280(256,%0),1280(%1)\n" - " mvc 1536(256,%0),1536(%1)\n" - " mvc 1792(256,%0),1792(%1)\n" - " mvc 2048(256,%0),2048(%1)\n" - " mvc 2304(256,%0),2304(%1)\n" - " mvc 2560(256,%0),2560(%1)\n" - " mvc 2816(256,%0),2816(%1)\n" - " mvc 3072(256,%0),3072(%1)\n" - " mvc 3328(256,%0),3328(%1)\n" - " mvc 3584(256,%0),3584(%1)\n" - " mvc 3840(256,%0),3840(%1)\n" - : : "a" (to), "a" (from) : "memory"); + register void *reg2 asm ("2") = to; + register unsigned long reg3 asm ("3") = 0x1000; + register void *reg4 asm ("4") = from; + register unsigned long reg5 asm ("5") = 0xb0001000; + asm volatile( + " mvcl 2,4" + : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5) + : : "memory", "cc"); } #define clear_user_page(page, vaddr, pg) clear_page(page) diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 1cc185da9d3..c030900320e 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -63,9 +63,10 @@ enum zpci_state { }; struct zpci_bar_struct { + struct resource *res; /* bus resource */ u32 val; /* bar start & 3 flag bits */ - u8 size; /* order 2 exponent */ u16 map_idx; /* index into bar mapping array */ + u8 size; /* order 2 exponent */ }; /* Private data per function */ @@ -77,10 +78,16 @@ struct zpci_dev { enum zpci_state state; u32 fid; /* function ID, used by sclp */ u32 fh; /* function handle, used by insn's */ + u16 vfn; /* virtual function number */ u16 pchid; /* physical channel ID */ u8 pfgid; /* function group ID */ + u8 pft; /* pci function type */ u16 domain; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u32 uid; /* user defined id */ + u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ + /* IRQ stuff */ u64 msi_addr; /* MSI address */ struct airq_iv *aibv; /* adapter interrupt bit vector */ @@ -97,6 +104,7 @@ struct zpci_dev { unsigned long iommu_pages; unsigned int next_bit; + char res_name[16]; struct zpci_bar_struct bars[PCI_BAR_COUNT]; u64 start_dma; /* Start of available DMA addresses */ @@ -118,16 +126,16 @@ static inline bool zdev_enabled(struct zpci_dev *zdev) return (zdev->fh & (1UL << 31)) ? true : false; } +extern const struct attribute_group *zpci_attr_groups[]; + /* ----------------------------------------------------------------------------- Prototypes ----------------------------------------------------------------------------- */ /* Base stuff */ -struct zpci_dev *zpci_alloc_device(void); int zpci_create_device(struct zpci_dev *); int zpci_enable_device(struct zpci_dev *); int zpci_disable_device(struct zpci_dev *); void zpci_stop_device(struct zpci_dev *); -void zpci_free_device(struct zpci_dev *); int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); int zpci_unregister_ioat(struct zpci_dev *, u8); @@ -144,6 +152,7 @@ int clp_disable_fh(struct zpci_dev *); void zpci_event_error(void *); void zpci_event_availability(void *); void zpci_rescan(void); +bool zpci_is_enabled(void); #else /* CONFIG_PCI */ static inline void zpci_event_error(void *e) {} static inline void zpci_event_availability(void *e) {} @@ -165,10 +174,6 @@ static inline void zpci_exit_slot(struct zpci_dev *zdev) {} struct zpci_dev *get_zdev(struct pci_dev *); struct zpci_dev *get_zdev_by_fid(u32); -/* sysfs */ -int zpci_sysfs_add_device(struct device *); -void zpci_sysfs_remove_device(struct device *); - /* DMA */ int zpci_dma_init(void); void zpci_dma_exit(void); diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h index d31d739f868..dd78f92f1cc 100644 --- a/arch/s390/include/asm/pci_clp.h +++ b/arch/s390/include/asm/pci_clp.h @@ -44,6 +44,7 @@ struct clp_fh_list_entry { #define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */ #define CLP_UTIL_STR_LEN 64 +#define CLP_PFIP_NR_SEGMENTS 4 /* List PCI functions request */ struct clp_req_list_pci { @@ -85,7 +86,7 @@ struct clp_rsp_query_pci { struct clp_rsp_hdr hdr; u32 fmt : 4; /* cmd request block format */ u32 : 28; - u64 reserved1; + u64 : 64; u16 vfn; /* virtual fn number */ u16 : 7; u16 util_str_avail : 1; /* utility string available? */ @@ -94,10 +95,13 @@ struct clp_rsp_query_pci { u8 bar_size[PCI_BAR_COUNT]; u16 pchid; u32 bar[PCI_BAR_COUNT]; - u64 reserved2; + u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */ + u32 : 24; + u8 pft; /* pci function type */ u64 sdma; /* start dma as */ u64 edma; /* end dma as */ - u64 reserved3[6]; + u32 reserved[11]; + u32 uid; /* user defined id */ u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */ } __packed; diff --git a/arch/s390/include/asm/pci_debug.h b/arch/s390/include/asm/pci_debug.h index 1ca5d1047c7..ac24b26fc06 100644 --- a/arch/s390/include/asm/pci_debug.h +++ b/arch/s390/include/asm/pci_debug.h @@ -6,14 +6,9 @@ extern debug_info_t *pci_debug_msg_id; extern debug_info_t *pci_debug_err_id; -#ifdef CONFIG_PCI_DEBUG #define zpci_dbg(imp, fmt, args...) \ debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args) -#else /* !CONFIG_PCI_DEBUG */ -#define zpci_dbg(imp, fmt, args...) do { } while (0) -#endif - #define zpci_err(text...) \ do { \ char debug_buffer[16]; \ diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index df6eac9f0cb..649eb62c52b 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h @@ -54,11 +54,9 @@ struct zpci_fib { u32 fmt : 8; /* format */ u32 : 24; - u32 reserved1; + u32 : 32; u8 fc; /* function controls */ - u8 reserved2; - u16 reserved3; - u32 reserved4; + u64 : 56; u64 pba; /* PCI base address */ u64 pal; /* PCI address limit */ u64 iota; /* I/O Translation Anchor */ @@ -70,14 +68,13 @@ struct zpci_fib { u32 sum : 1; /* Adapter int summary bit enabled */ u32 : 1; u32 aisbo : 6; /* Adapter int summary bit offset */ - u32 reserved5; + u32 : 32; u64 aibv; /* Adapter int bit vector address */ u64 aisb; /* Adapter int summary bit address */ u64 fmb_addr; /* Function measurement block address and key */ - u64 reserved6; - u64 reserved7; -} __packed; - + u32 : 32; + u32 gd; +} __packed __aligned(8); int zpci_mod_fc(u64 req, struct zpci_fib *fib); int zpci_refresh_trans(u64 fn, u64 addr, u64 range); diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 86fe0ee2cee..fa91e009745 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -10,16 +10,22 @@ */ #define __my_cpu_offset S390_lowcore.percpu_offset +#ifdef CONFIG_64BIT + /* * For 64 bit module code, the module may be more than 4G above the * per cpu area, use weak definitions to force the compiler to * generate external references. */ -#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE) +#if defined(CONFIG_SMP) && defined(MODULE) #define ARCH_NEEDS_WEAK_PER_CPU #endif -#define arch_this_cpu_to_op(pcp, val, op) \ +/* + * We use a compare-and-swap loop since that uses less cpu cycles than + * disabling and enabling interrupts like the generic variant would do. + */ +#define arch_this_cpu_to_op_simple(pcp, val, op) \ ({ \ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ @@ -30,42 +36,101 @@ do { \ old__ = prev__; \ new__ = old__ op (val); \ - switch (sizeof(*ptr__)) { \ - case 8: \ - prev__ = cmpxchg64(ptr__, old__, new__); \ - break; \ - default: \ - prev__ = cmpxchg(ptr__, old__, new__); \ - } \ + prev__ = cmpxchg(ptr__, old__, new__); \ } while (prev__ != old__); \ preempt_enable(); \ new__; \ }) -#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) +#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) +#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) + +#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES + +#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +) +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, &) +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, |) + +#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */ + +#define arch_this_cpu_add(pcp, val, op1, op2, szcast) \ +{ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable(); \ + ptr__ = __this_cpu_ptr(&(pcp)); \ + if (__builtin_constant_p(val__) && \ + ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ + asm volatile( \ + op2 " %[ptr__],%[val__]\n" \ + : [ptr__] "+Q" (*ptr__) \ + : [val__] "i" ((szcast)val__) \ + : "cc"); \ + } else { \ + asm volatile( \ + op1 " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + } \ + preempt_enable(); \ +} -#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) +#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) +#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long) -#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) -#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) -#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) -#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) +#define arch_this_cpu_add_return(pcp, val, op) \ +({ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable(); \ + ptr__ = __this_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + preempt_enable(); \ + old__ + val__; \ +}) -#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) -#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) -#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |) -#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |) +#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa") +#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag") -#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) -#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) -#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) -#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) +#define arch_this_cpu_to_op(pcp, val, op) \ +{ \ + typedef typeof(pcp) pcp_op_T__; \ + pcp_op_T__ val__ = (val); \ + pcp_op_T__ old__, *ptr__; \ + preempt_disable(); \ + ptr__ = __this_cpu_ptr(&(pcp)); \ + asm volatile( \ + op " %[old__],%[val__],%[ptr__]\n" \ + : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ + : [val__] "d" (val__) \ + : "cc"); \ + preempt_enable(); \ +} + +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, "lang") +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lao") +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, "laog") + +#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #define arch_this_cpu_cmpxchg(pcp, oval, nval) \ ({ \ @@ -74,13 +139,7 @@ pcp_op_T__ *ptr__; \ preempt_disable(); \ ptr__ = __this_cpu_ptr(&(pcp)); \ - switch (sizeof(*ptr__)) { \ - case 8: \ - ret__ = cmpxchg64(ptr__, oval, nval); \ - break; \ - default: \ - ret__ = cmpxchg(ptr__, oval, nval); \ - } \ + ret__ = cmpxchg(ptr__, oval, nval); \ preempt_enable(); \ ret__; \ }) @@ -104,9 +163,7 @@ #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval) #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval) -#ifdef CONFIG_64BIT #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval) -#endif #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2) \ ({ \ @@ -124,9 +181,9 @@ }) #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double -#ifdef CONFIG_64BIT #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double -#endif + +#endif /* CONFIG_64BIT */ #include <asm-generic/percpu.h> diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h index 1141fb3e7b2..159a8ec6da9 100644 --- a/arch/s390/include/asm/perf_event.h +++ b/arch/s390/include/asm/perf_event.h @@ -1,21 +1,40 @@ /* * Performance event support - s390 specific definitions. * - * Copyright IBM Corp. 2009, 2012 + * Copyright IBM Corp. 2009, 2013 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> */ -#include <asm/cpu_mf.h> +#ifndef _ASM_S390_PERF_EVENT_H +#define _ASM_S390_PERF_EVENT_H -/* CPU-measurement counter facility */ -#define PERF_CPUM_CF_MAX_CTR 256 +#ifdef CONFIG_64BIT + +#include <linux/perf_event.h> +#include <linux/device.h> +#include <asm/cpu_mf.h> /* Per-CPU flags for PMU states */ #define PMU_F_RESERVED 0x1000 #define PMU_F_ENABLED 0x2000 +#define PMU_F_IN_USE 0x4000 +#define PMU_F_ERR_IBE 0x0100 +#define PMU_F_ERR_LSDA 0x0200 +#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA) + +/* Perf defintions for PMU event attributes in sysfs */ +extern __init const struct attribute_group **cpumf_cf_event_group(void); +extern ssize_t cpumf_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page); +#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name +#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr) + +#define CPUMF_EVENT_ATTR(cat, name, id) \ + PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show) +#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name) -#ifdef CONFIG_64BIT /* Perf callbacks */ struct pt_regs; @@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); #define perf_misc_flags(regs) perf_misc_flags(regs) +/* Perf pt_regs extension for sample-data-entry indicators */ +struct perf_sf_sde_regs { + unsigned char in_guest:1; /* guest sample */ + unsigned long reserved:63; /* reserved */ +}; + +/* Perf PMU definitions for the counter facility */ +#define PERF_CPUM_CF_MAX_CTR 256 + +/* Perf PMU definitions for the sampling facility */ +#define PERF_CPUM_SF_MAX_CTR 2 +#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */ +#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */ +#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */ +#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */ +#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \ + PERF_CPUM_SF_DIAG_MODE) +#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */ + +#define REG_NONE 0 +#define REG_OVERFLOW 1 +#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config) +#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc) +#define RAWSAMPLE_REG(hwc) ((hwc)->config) +#define TEAR_REG(hwc) ((hwc)->last_tag) +#define SAMPL_RATE(hwc) ((hwc)->event_base) +#define SAMPL_FLAGS(hwc) ((hwc)->config_base) +#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) +#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) + +/* Structure for sampling data entries to be passed as perf raw sample data + * to user space. Note that raw sample data must be aligned and, thus, might + * be padded with zeros. + */ +struct sf_raw_sample { +#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE +#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE + u64 format; + u32 size; /* Size of sf_raw_sample */ + u16 bsdes; /* Basic-sampling data entry size */ + u16 dsdes; /* Diagnostic-sampling data entry size */ + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ + u8 padding[]; /* Padding to next multiple of 8 */ +} __packed; + +/* Perf hardware reserve and release functions */ +int perf_reserve_sampling(void); +void perf_release_sampling(void); + #endif /* CONFIG_64BIT */ +#endif /* _ASM_S390_PERF_EVENT_H */ diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index e1408ddb94f..9e18a61d3df 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -22,6 +22,8 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long); void page_table_free(struct mm_struct *, unsigned long *); void page_table_free_rcu(struct mmu_gather *, unsigned long *); +void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long, + bool init_skey); int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, unsigned long key, bool nq); @@ -91,11 +93,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) { unsigned long *table = crst_table_alloc(mm); - if (table) - crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + + if (!table) + return NULL; + crst_table_init(table, _SEGMENT_ENTRY_EMPTY); + if (!pgtable_pmd_page_ctor(virt_to_page(table))) { + crst_table_free(mm, table); + return NULL; + } return (pmd_t *) table; } -#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + pgtable_pmd_page_dtor(virt_to_page(pmd)); + crst_table_free(mm, (unsigned long *) pmd); +} static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 9b60a36c348..fcba5e03839 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -229,6 +229,7 @@ extern unsigned long MODULES_END; #define _PAGE_READ 0x010 /* SW pte read bit */ #define _PAGE_WRITE 0x020 /* SW pte write bit */ #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ +#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ #define __HAVE_ARCH_PTE_SPECIAL /* Set of bits not changed in pte_modify */ @@ -308,7 +309,8 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x00200000UL #define PGSTE_GR_BIT 0x00040000UL #define PGSTE_GC_BIT 0x00020000UL -#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ +#define PGSTE_UC_BIT 0x00008000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x00004000UL /* IPTE notify bit */ #else /* CONFIG_64BIT */ @@ -390,10 +392,17 @@ extern unsigned long MODULES_END; #define PGSTE_HC_BIT 0x0020000000000000UL #define PGSTE_GR_BIT 0x0004000000000000UL #define PGSTE_GC_BIT 0x0002000000000000UL -#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ +#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */ +#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */ #endif /* CONFIG_64BIT */ +/* Guest Page State used for virtualization */ +#define _PGSTE_GPS_ZERO 0x0000000080000000UL +#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL +#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL +#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL + /* * A user page table pointer has the space-switch-event bit, the * private-space-control bit and the storage-alteration-event-control @@ -459,6 +468,16 @@ static inline int mm_has_pgste(struct mm_struct *mm) #endif return 0; } + +static inline int mm_use_skey(struct mm_struct *mm) +{ +#ifdef CONFIG_PGSTE + if (mm->context.use_skey) + return 1; +#endif + return 0; +} + /* * pgd/pmd/pte query functions */ @@ -617,6 +636,14 @@ static inline int pte_none(pte_t pte) return pte_val(pte) == _PAGE_INVALID; } +static inline int pte_swap(pte_t pte) +{ + /* Bit pattern: (pte & 0x603) == 0x402 */ + return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | + _PAGE_TYPE | _PAGE_PRESENT)) + == (_PAGE_INVALID | _PAGE_TYPE); +} + static inline int pte_file(pte_t pte) { /* Bit pattern: (pte & 0x601) == 0x600 */ @@ -684,26 +711,17 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) #endif } -static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) +static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste, + struct mm_struct *mm) { #ifdef CONFIG_PGSTE unsigned long address, bits, skey; - if (pte_val(*ptep) & _PAGE_INVALID) + if (!mm_use_skey(mm) || pte_val(*ptep) & _PAGE_INVALID) return pgste; address = pte_val(*ptep) & PAGE_MASK; skey = (unsigned long) page_get_storage_key(address); bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); - if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { - /* Transfer dirty + referenced bit to host bits in pgste */ - pgste_val(pgste) |= bits << 52; - page_set_storage_key(address, skey ^ bits, 0); - } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && - (bits & _PAGE_REFERENCED)) { - /* Transfer referenced bit to host bit in pgste */ - pgste_val(pgste) |= PGSTE_HR_BIT; - page_reset_referenced(address); - } /* Transfer page changed & referenced bit to guest bits in pgste */ pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ /* Copy page access key and fetch protection bit to pgste */ @@ -714,25 +732,14 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) } -static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) -{ -#ifdef CONFIG_PGSTE - if (pte_val(*ptep) & _PAGE_INVALID) - return pgste; - /* Get referenced bit from storage key */ - if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) - pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; -#endif - return pgste; -} - -static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) +static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry, + struct mm_struct *mm) { #ifdef CONFIG_PGSTE unsigned long address; unsigned long nkey; - if (pte_val(entry) & _PAGE_INVALID) + if (!mm_use_skey(mm) || pte_val(entry) & _PAGE_INVALID) return; VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); address = pte_val(entry) & PAGE_MASK; @@ -742,21 +749,30 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) * key C/R to 0. */ nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56; + nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48; page_set_storage_key(address, nkey, 0); #endif } -static inline void pgste_set_pte(pte_t *ptep, pte_t entry) +static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry) { - if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { - /* - * Without enhanced suppression-on-protection force - * the dirty bit on for all writable ptes. - */ - pte_val(entry) |= _PAGE_DIRTY; - pte_val(entry) &= ~_PAGE_PROTECT; + if ((pte_val(entry) & _PAGE_PRESENT) && + (pte_val(entry) & _PAGE_WRITE) && + !(pte_val(entry) & _PAGE_INVALID)) { + if (!MACHINE_HAS_ESOP) { + /* + * Without enhanced suppression-on-protection force + * the dirty bit on for all writable ptes. + */ + pte_val(entry) |= _PAGE_DIRTY; + pte_val(entry) &= ~_PAGE_PROTECT; + } + if (!(pte_val(entry) & _PAGE_PROTECT)) + /* This pte allows write access, set user-dirty */ + pgste_val(pgste) |= PGSTE_UC_BIT; } *ptep = entry; + return pgste; } /** @@ -765,6 +781,7 @@ static inline void pgste_set_pte(pte_t *ptep, pte_t entry) * @table: pointer to the page directory * @asce: address space control element for gmap page table * @crst_list: list of all crst tables used in the guest address space + * @pfault_enabled: defines if pfaults are applicable for the guest */ struct gmap { struct list_head list; @@ -773,6 +790,7 @@ struct gmap { unsigned long asce; void *private; struct list_head crst_list; + bool pfault_enabled; }; /** @@ -819,20 +837,22 @@ unsigned long gmap_translate(unsigned long address, struct gmap *); unsigned long __gmap_fault(unsigned long address, struct gmap *); unsigned long gmap_fault(unsigned long address, struct gmap *); void gmap_discard(unsigned long from, unsigned long to, struct gmap *); +void __gmap_zap(unsigned long address, struct gmap *); +bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *); + void gmap_register_ipte_notifier(struct gmap_notifier *); void gmap_unregister_ipte_notifier(struct gmap_notifier *); int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); -void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); +void gmap_do_ipte_notify(struct mm_struct *, pte_t *); static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, - unsigned long addr, pte_t *ptep, pgste_t pgste) { #ifdef CONFIG_PGSTE if (pgste_val(pgste) & PGSTE_IN_BIT) { pgste_val(pgste) &= ~PGSTE_IN_BIT; - gmap_do_ipte_notify(mm, addr, ptep); + gmap_do_ipte_notify(mm, ptep); } #endif return pgste; @@ -850,8 +870,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_set_key(ptep, pgste, entry); - pgste_set_pte(ptep, entry); + pgste_val(pgste) &= ~_PGSTE_GPS_ZERO; + pgste_set_key(ptep, pgste, entry, mm); + pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else { if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1) @@ -879,6 +900,12 @@ static inline int pte_young(pte_t pte) return (pte_val(pte) & _PAGE_YOUNG) != 0; } +#define __HAVE_ARCH_PTE_UNUSED +static inline int pte_unused(pte_t pte) +{ + return pte_val(pte) & _PAGE_UNUSED; +} + /* * pgd/pmd/pte modification functions */ @@ -991,71 +1018,96 @@ static inline pte_t pte_mkhuge(pte_t pte) } #endif -/* - * Get (and clear) the user dirty bit for a pte. - */ -static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, - pte_t *ptep) +static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { - pgste_t pgste; - int dirty = 0; + unsigned long pto = (unsigned long) ptep; - if (mm_has_pgste(mm)) { - pgste = pgste_get_lock(ptep); - pgste = pgste_update_all(ptep, pgste); - dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); - pgste_val(pgste) &= ~PGSTE_HC_BIT; - pgste_set_unlock(ptep, pgste); - return dirty; - } - return dirty; +#ifndef CONFIG_64BIT + /* pto in ESA mode must point to the start of the segment table */ + pto &= 0x7ffffc00; +#endif + /* Invalidation + global TLB flush for the pte */ + asm volatile( + " ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); } -/* - * Get (and clear) the user referenced bit for a pte. - */ -static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, - pte_t *ptep) +static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep) { - pgste_t pgste; - int young = 0; + unsigned long pto = (unsigned long) ptep; - if (mm_has_pgste(mm)) { - pgste = pgste_get_lock(ptep); - pgste = pgste_update_young(ptep, pgste); - young = !!(pgste_val(pgste) & PGSTE_HR_BIT); - pgste_val(pgste) &= ~PGSTE_HR_BIT; - pgste_set_unlock(ptep, pgste); - } - return young; +#ifndef CONFIG_64BIT + /* pto in ESA mode must point to the start of the segment table */ + pto &= 0x7ffffc00; +#endif + /* Invalidation + local TLB flush for the pte */ + asm volatile( + " .insn rrf,0xb2210000,%2,%3,0,1" + : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address)); } -static inline void __ptep_ipte(unsigned long address, pte_t *ptep) +static inline void ptep_flush_direct(struct mm_struct *mm, + unsigned long address, pte_t *ptep) { - if (!(pte_val(*ptep) & _PAGE_INVALID)) { -#ifndef CONFIG_64BIT - /* pto must point to the start of the segment table */ - pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); -#else - /* ipte in zarch mode can do the math */ - pte_t *pto = ptep; -#endif - asm volatile( - " ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address)); - } + int active, count; + + if (pte_val(*ptep) & _PAGE_INVALID) + return; + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && + cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) + __ptep_ipte_local(address, ptep); + else + __ptep_ipte(address, ptep); + atomic_sub(0x10000, &mm->context.attach_count); } static inline void ptep_flush_lazy(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - int active = (mm == current->active_mm) ? 1 : 0; + int active, count; - if (atomic_read(&mm->context.attach_count) > active) - __ptep_ipte(address, ptep); - else + if (pte_val(*ptep) & _PAGE_INVALID) + return; + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if ((count & 0xffff) <= active) { + pte_val(*ptep) |= _PAGE_INVALID; mm->context.flush_mm = 1; + } else + __ptep_ipte(address, ptep); + atomic_sub(0x10000, &mm->context.attach_count); +} + +/* + * Get (and clear) the user dirty bit for a pte. + */ +static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep) +{ + pgste_t pgste; + pte_t pte; + int dirty; + + if (!mm_has_pgste(mm)) + return 0; + pgste = pgste_get_lock(ptep); + dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); + pgste_val(pgste) &= ~PGSTE_UC_BIT; + pte = *ptep; + if (dirty && (pte_val(pte) & _PAGE_PRESENT)) { + pgste = pgste_ipte_notify(mm, ptep, pgste); + __ptep_ipte(addr, ptep); + if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE)) + pte_val(pte) |= _PAGE_PROTECT; + else + pte_val(pte) |= _PAGE_INVALID; + *ptep = pte; + } + pgste_set_unlock(ptep, pgste); + return dirty; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -1068,16 +1120,16 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } pte = *ptep; - __ptep_ipte(addr, ptep); + ptep_flush_direct(vma->vm_mm, addr, ptep); young = pte_young(pte); pte = pte_mkold(pte); if (mm_has_pgste(vma->vm_mm)) { - pgste_set_pte(ptep, pte); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1114,7 +1166,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; @@ -1122,7 +1174,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1138,15 +1190,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste_ipte_notify(mm, address, ptep, pgste); + pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; ptep_flush_lazy(mm, address, ptep); - pte_val(*ptep) |= _PAGE_INVALID; if (mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set(ptep, pgste); } return pte; @@ -1160,8 +1211,8 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, if (mm_has_pgste(mm)) { pgste = pgste_get(ptep); - pgste_set_key(ptep, pgste, pte); - pgste_set_pte(ptep, pte); + pgste_set_key(ptep, pgste, pte, mm); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1176,15 +1227,18 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } pte = *ptep; - __ptep_ipte(address, ptep); + ptep_flush_direct(vma->vm_mm, address, ptep); pte_val(*ptep) = _PAGE_INVALID; if (mm_has_pgste(vma->vm_mm)) { - pgste = pgste_update_all(&pte, pgste); + if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) == + _PGSTE_GPS_USAGE_UNUSED) + pte_val(pte) |= _PAGE_UNUSED; + pgste = pgste_update_all(&pte, pgste, vma->vm_mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1207,7 +1261,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, if (!full && mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } pte = *ptep; @@ -1216,7 +1270,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, pte_val(*ptep) = _PAGE_INVALID; if (!full && mm_has_pgste(mm)) { - pgste = pgste_update_all(&pte, pgste); + pgste = pgste_update_all(&pte, pgste, mm); pgste_set_unlock(ptep, pgste); } return pte; @@ -1232,14 +1286,14 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, if (pte_write(pte)) { if (mm_has_pgste(mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(mm, address, ptep, pgste); + pgste = pgste_ipte_notify(mm, ptep, pgste); } ptep_flush_lazy(mm, address, ptep); pte = pte_wrprotect(pte); if (mm_has_pgste(mm)) { - pgste_set_pte(ptep, pte); + pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else *ptep = pte; @@ -1258,13 +1312,13 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, return 0; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); - pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); + pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } - __ptep_ipte(address, ptep); + ptep_flush_direct(vma->vm_mm, address, ptep); if (mm_has_pgste(vma->vm_mm)) { - pgste_set_pte(ptep, entry); + pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else *ptep = entry; @@ -1345,35 +1399,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) -static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) -{ - unsigned long sto = (unsigned long) pmdp - - pmd_index(address) * sizeof(pmd_t); - - if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { - asm volatile( - " .insn rrf,0xb98e0000,%2,%3,0,0" - : "=m" (*pmdp) - : "m" (*pmdp), "a" (sto), - "a" ((address & HPAGE_MASK)) - : "cc" - ); - } -} - -static inline void __pmd_csp(pmd_t *pmdp) -{ - register unsigned long reg2 asm("2") = pmd_val(*pmdp); - register unsigned long reg3 asm("3") = pmd_val(*pmdp) | - _SEGMENT_ENTRY_INVALID; - register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; - - asm volatile( - " csp %1,%3" - : "=m" (*pmdp) - : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); -} - #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) { @@ -1442,15 +1467,81 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ +static inline void __pmdp_csp(pmd_t *pmdp) +{ + register unsigned long reg2 asm("2") = pmd_val(*pmdp); + register unsigned long reg3 asm("3") = pmd_val(*pmdp) | + _SEGMENT_ENTRY_INVALID; + register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; + + asm volatile( + " csp %1,%3" + : "=m" (*pmdp) + : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); +} + +static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp) +{ + unsigned long sto; + + sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); + asm volatile( + " .insn rrf,0xb98e0000,%2,%3,0,0" + : "=m" (*pmdp) + : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) + : "cc" ); +} + +static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp) +{ + unsigned long sto; + + sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t); + asm volatile( + " .insn rrf,0xb98e0000,%2,%3,0,1" + : "=m" (*pmdp) + : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK)) + : "cc" ); +} + +static inline void pmdp_flush_direct(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + int active, count; + + if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) + return; + if (!MACHINE_HAS_IDTE) { + __pmdp_csp(pmdp); + return; + } + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && + cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) + __pmdp_idte_local(address, pmdp); + else + __pmdp_idte(address, pmdp); + atomic_sub(0x10000, &mm->context.attach_count); +} + static inline void pmdp_flush_lazy(struct mm_struct *mm, unsigned long address, pmd_t *pmdp) { - int active = (mm == current->active_mm) ? 1 : 0; + int active, count; - if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) - __pmd_idte(address, pmdp); - else + if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID) + return; + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if ((count & 0xffff) <= active) { + pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID; mm->context.flush_mm = 1; + } else if (MACHINE_HAS_IDTE) + __pmdp_idte(address, pmdp); + else + __pmdp_csp(pmdp); + atomic_sub(0x10000, &mm->context.attach_count); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -1502,7 +1593,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, pmd_t pmd; pmd = *pmdp; - __pmd_idte(address, pmdp); + pmdp_flush_direct(vma->vm_mm, address, pmdp); *pmdp = pmd_mkold(pmd); return pmd_young(pmd); } @@ -1513,7 +1604,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, { pmd_t pmd = *pmdp; - __pmd_idte(address, pmdp); + pmdp_flush_direct(mm, address, pmdp); pmd_clear(pmdp); return pmd; } @@ -1529,7 +1620,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, static inline void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { - __pmd_idte(address, pmdp); + pmdp_flush_direct(vma->vm_mm, address, pmdp); } #define __HAVE_ARCH_PMDP_SET_WRPROTECT @@ -1539,7 +1630,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, pmd_t pmd = *pmdp; if (pmd_write(pmd)) { - __pmd_idte(address, pmdp); + pmdp_flush_direct(mm, address, pmdp); set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); } } @@ -1635,6 +1726,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) extern int vmem_add_mapping(unsigned long start, unsigned long size); extern int vmem_remove_mapping(unsigned long start, unsigned long size); extern int s390_enable_sie(void); +extern void s390_enable_skey(void); /* * No page table caches to initialise diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 0eb37505cab..6f02d452bbe 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -11,6 +11,13 @@ #ifndef __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H +#define CIF_MCCK_PENDING 0 /* machine check handling is pending */ +#define CIF_ASCE 1 /* user asce needs fixup / uaccess */ + +#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) +#define _CIF_ASCE (1<<CIF_ASCE) + + #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -21,6 +28,21 @@ #include <asm/setup.h> #include <asm/runtime_instr.h> +static inline void set_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags |= (1U << flag); +} + +static inline void clear_cpu_flag(int flag) +{ + S390_lowcore.cpu_flags &= ~(1U << flag); +} + +static inline int test_cpu_flag(int flag) +{ + return !!(S390_lowcore.cpu_flags & (1U << flag)); +} + /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -79,6 +101,7 @@ struct thread_struct { unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; unsigned long gmap_addr; /* address of last gmap fault. */ + unsigned int gmap_pfault; /* signal of a pending guest pfault */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ unsigned long per_flags; /* Flags to control debug behavior */ @@ -134,19 +157,17 @@ struct stack_frame { * Do necessary setup to start up a new thread. */ #define start_thread(regs, new_psw, new_stackp) do { \ - regs->psw.mask = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA; \ + regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ execve_tail(); \ } while (0) #define start_thread31(regs, new_psw, new_stackp) do { \ - regs->psw.mask = psw_user_bits | PSW_MASK_BA; \ + regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ regs->gprs[15] = new_stackp; \ - __tlb_flush_mm(current->mm); \ crst_table_downgrade(current->mm, 1UL << 31); \ - update_mm(current->mm, current); \ execve_tail(); \ } while (0) @@ -169,17 +190,15 @@ extern void release_thread(struct task_struct *); */ extern unsigned long thread_saved_pc(struct task_struct *t); -extern void show_code(struct pt_regs *regs); -extern void print_fn_code(unsigned char *code, unsigned long len); -extern int insn_to_mnemonic(unsigned char *instruction, char *buf, - unsigned int len); - unsigned long get_wchan(struct task_struct *p); #define task_pt_regs(tsk) ((struct pt_regs *) \ (task_stack_page(tsk) + THREAD_SIZE) - 1) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15]) +/* Has task runtime instrumentation enabled ? */ +#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb) + static inline unsigned short stap(void) { unsigned short cpu_address; @@ -198,6 +217,8 @@ static inline void cpu_relax(void) barrier(); } +#define arch_mutex_cpu_relax() barrier() + static inline void psw_set_key(unsigned int key) { asm volatile("spka 0(%0)" : : "d" (key)); @@ -346,9 +367,9 @@ __set_psw_mask(unsigned long mask) } #define local_mcck_enable() \ - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK) + __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK) #define local_mcck_disable() \ - __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT) + __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT) /* * Basic Machine Check/Program Check Handler. diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 52b56533c57..55d69dd7473 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -8,10 +8,63 @@ #include <uapi/asm/ptrace.h> +#define PIF_SYSCALL 0 /* inside a system call */ +#define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ + +#define _PIF_SYSCALL (1<<PIF_SYSCALL) +#define _PIF_PER_TRAP (1<<PIF_PER_TRAP) + #ifndef __ASSEMBLY__ -extern long psw_kernel_bits; -extern long psw_user_bits; +#define PSW_KERNEL_BITS (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \ + PSW_MASK_EA | PSW_MASK_BA) +#define PSW_USER_BITS (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ + PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \ + PSW_MASK_PSTATE | PSW_ASC_PRIMARY) + +struct psw_bits { + unsigned long long : 1; + unsigned long long r : 1; /* PER-Mask */ + unsigned long long : 3; + unsigned long long t : 1; /* DAT Mode */ + unsigned long long i : 1; /* Input/Output Mask */ + unsigned long long e : 1; /* External Mask */ + unsigned long long key : 4; /* PSW Key */ + unsigned long long : 1; + unsigned long long m : 1; /* Machine-Check Mask */ + unsigned long long w : 1; /* Wait State */ + unsigned long long p : 1; /* Problem State */ + unsigned long long as : 2; /* Address Space Control */ + unsigned long long cc : 2; /* Condition Code */ + unsigned long long pm : 4; /* Program Mask */ + unsigned long long ri : 1; /* Runtime Instrumentation */ + unsigned long long : 6; + unsigned long long eaba : 2; /* Addressing Mode */ +#ifdef CONFIG_64BIT + unsigned long long : 31; + unsigned long long ia : 64;/* Instruction Address */ +#else + unsigned long long ia : 31;/* Instruction Address */ +#endif +}; + +enum { + PSW_AMODE_24BIT = 0, + PSW_AMODE_31BIT = 1, + PSW_AMODE_64BIT = 3 +}; + +enum { + PSW_AS_PRIMARY = 0, + PSW_AS_ACCREG = 1, + PSW_AS_SECONDARY = 2, + PSW_AS_HOME = 3 +}; + +#define psw_bits(__psw) (*({ \ + typecheck(psw_t, __psw); \ + &(*(struct psw_bits *)(&(__psw))); \ +})) /* * The pt_regs struct defines the way the registers are stored on @@ -26,6 +79,7 @@ struct pt_regs unsigned int int_code; unsigned int int_parm; unsigned long int_parm_long; + unsigned long flags; }; /* @@ -76,10 +130,26 @@ struct per_struct_kernel { #define PER_CONTROL_SUSPENSION 0x00400000UL #define PER_CONTROL_ALTERATION 0x00200000UL +static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags |= (1U << flag); +} + +static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) +{ + regs->flags &= ~(1U << flag); +} + +static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) +{ + return !!(regs->flags & (1U << flag)); +} + /* * These are defined as per linux/ptrace.h, which see. */ #define arch_has_single_step() (1) +#define arch_has_block_step() (1) #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 57d0d7e794b..d786c634e05 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, #define QDIO_FLAG_CLEANUP_USING_HALT 0x02 /** - * struct qdio_initialize - qdio initalization data + * struct qdio_initialize - qdio initialization data * @cdev: associated ccw device * @q_format: queue format * @adapter_name: name for the adapter @@ -378,6 +378,34 @@ struct qdio_initialize { struct qdio_outbuf_state *output_sbal_state_array; }; +/** + * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc() + * @l3_ipv6_addr: entry contains IPv6 address + * @l3_ipv4_addr: entry contains IPv4 address + * @l2_addr_lnid: entry contains MAC address and VLAN ID + */ +enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid}; + +/** + * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc() + * @nit: Network interface token + * @addr: Address of one of the three types + * + * The struct is passed to the callback function by qdio_brinfo_desc() + */ +struct qdio_brinfo_entry_l3_ipv6 { + u64 nit; + struct { unsigned char _s6_addr[16]; } addr; +} __packed; +struct qdio_brinfo_entry_l3_ipv4 { + u64 nit; + struct { uint32_t _s_addr; } addr; +} __packed; +struct qdio_brinfo_entry_l2 { + u64 nit; + struct { u8 mac[6]; u16 lnid; } addr_lnid; +} __packed; + #define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ #define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ #define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ @@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *); extern int qdio_shutdown(struct ccw_device *, int); extern int qdio_free(struct ccw_device *); extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); +extern int qdio_pnso_brinfo(struct subchannel_id schid, + int cnc, u16 *response, + void (*cb)(void *priv, enum qdio_brinfo_entry_type type, + void *entry), + void *priv); #endif /* __QDIO_H__ */ diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h index 7dc7f9c63b6..1aba89b53cb 100644 --- a/arch/s390/include/asm/sclp.h +++ b/arch/s390/include/asm/sclp.h @@ -8,6 +8,7 @@ #include <linux/types.h> #include <asm/chpid.h> +#include <asm/cpu.h> #define SCLP_CHP_INFO_MASK_SIZE 32 @@ -27,7 +28,11 @@ struct sclp_ipl_info { struct sclp_cpu_entry { u8 address; - u8 reserved0[13]; + u8 reserved0[2]; + u8 : 3; + u8 siif : 1; + u8 : 4; + u8 reserved2[10]; u8 type; u8 reserved1; } __attribute__((packed)); @@ -37,25 +42,30 @@ struct sclp_cpu_info { unsigned int standby; unsigned int combined; int has_cpu_type; - struct sclp_cpu_entry cpu[255]; + struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1]; }; int sclp_get_cpu_info(struct sclp_cpu_info *info); int sclp_cpu_configure(u8 cpu); int sclp_cpu_deconfigure(u8 cpu); -void sclp_facilities_detect(void); unsigned long long sclp_get_rnmax(void); unsigned long long sclp_get_rzm(void); +unsigned int sclp_get_max_cpu(void); int sclp_sdias_blk_count(void); int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); int sclp_chp_configure(struct chp_id chpid); int sclp_chp_deconfigure(struct chp_id chpid); int sclp_chp_read_info(struct sclp_chp_info *info); void sclp_get_ipl_info(struct sclp_ipl_info *info); -bool sclp_has_linemode(void); -bool sclp_has_vt220(void); +bool __init sclp_has_linemode(void); +bool __init sclp_has_vt220(void); +bool sclp_has_sprp(void); int sclp_pci_configure(u32 fid); int sclp_pci_deconfigure(u32 fid); int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); +unsigned long sclp_get_hsa_size(void); +void sclp_early_detect(void); +int sclp_has_siif(void); +unsigned int sclp_get_ibc(void); #endif /* _ASM_S390_SCLP_H */ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 59880dbaf36..089a49814c5 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -9,7 +9,6 @@ #define PARMAREA 0x10400 -#define MEMORY_CHUNKS 256 #ifndef __ASSEMBLY__ @@ -31,29 +30,11 @@ #endif /* CONFIG_64BIT */ #define COMMAND_LINE ((char *) (0x10480)) -#define CHUNK_READ_WRITE 0 -#define CHUNK_READ_ONLY 1 - -struct mem_chunk { - unsigned long addr; - unsigned long size; - int type; -}; - -extern struct mem_chunk memory_chunk[]; extern int memory_end_set; extern unsigned long memory_end; +extern unsigned long max_physmem_end; -void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize); -void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr, - unsigned long size); - -#define PRIMARY_SPACE_MODE 0 -#define ACCESS_REGISTER_MODE 1 -#define SECONDARY_SPACE_MODE 2 -#define HOME_SPACE_MODE 3 - -extern unsigned int s390_user_mode; +extern void detect_memory_memblock(void); /* * Machine features detected in head.S @@ -66,7 +47,6 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_DIAG44 (1UL << 4) #define MACHINE_FLAG_IDTE (1UL << 5) #define MACHINE_FLAG_DIAG9C (1UL << 6) -#define MACHINE_FLAG_MVCOS (1UL << 7) #define MACHINE_FLAG_KVM (1UL << 8) #define MACHINE_FLAG_ESOP (1UL << 9) #define MACHINE_FLAG_EDAT1 (1UL << 10) @@ -76,6 +56,7 @@ extern unsigned int s390_user_mode; #define MACHINE_FLAG_TOPOLOGY (1UL << 14) #define MACHINE_FLAG_TE (1UL << 15) #define MACHINE_FLAG_RRBM (1UL << 16) +#define MACHINE_FLAG_TLB_LC (1UL << 17) #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) @@ -92,31 +73,28 @@ extern unsigned int s390_user_mode; #define MACHINE_HAS_IDTE (0) #define MACHINE_HAS_DIAG44 (1) #define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) -#define MACHINE_HAS_MVCOS (0) #define MACHINE_HAS_EDAT1 (0) #define MACHINE_HAS_EDAT2 (0) #define MACHINE_HAS_LPP (0) #define MACHINE_HAS_TOPOLOGY (0) #define MACHINE_HAS_TE (0) #define MACHINE_HAS_RRBM (0) +#define MACHINE_HAS_TLB_LC (0) #else /* CONFIG_64BIT */ #define MACHINE_HAS_IEEE (1) #define MACHINE_HAS_CSP (1) #define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) #define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) #define MACHINE_HAS_MVPG (1) -#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) #define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) #define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) #define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) #define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) #define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE) #define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM) +#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC) #endif /* CONFIG_64BIT */ -#define ZFCPDUMP_HSA_SIZE (32UL<<20) -#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20) - /* * Console mode. Override with conmode= */ diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index 5a87d16d3e7..bf9c823d402 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -5,6 +5,7 @@ #define SIGP_SENSE 1 #define SIGP_EXTERNAL_CALL 2 #define SIGP_EMERGENCY_SIGNAL 3 +#define SIGP_START 4 #define SIGP_STOP 5 #define SIGP_RESTART 6 #define SIGP_STOP_AND_STORE_STATUS 9 @@ -12,6 +13,7 @@ #define SIGP_SET_PREFIX 13 #define SIGP_STORE_STATUS_AT_ADDRESS 14 #define SIGP_SET_ARCHITECTURE 18 +#define SIGP_COND_EMERGENCY_SIGNAL 19 #define SIGP_SENSE_RUNNING 21 /* SIGP condition codes */ @@ -29,4 +31,23 @@ #define SIGP_STATUS_INCORRECT_STATE 0x00000200UL #define SIGP_STATUS_NOT_RUNNING 0x00000400UL +#ifndef __ASSEMBLY__ + +static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status) +{ + register unsigned int reg1 asm ("1") = parm; + int cc; + + asm volatile( + " sigp %1,%2,0(%3)\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc"); + if (status && cc == 1) + *status = reg1; + return cc; +} + +#endif /* __ASSEMBLY__ */ + #endif /* __S390_ASM_SIGP_H */ diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index b64f15c3b4c..4f1307962a9 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -7,6 +7,8 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H +#include <asm/sigp.h> + #ifdef CONFIG_SMP #include <asm/lowcore.h> @@ -14,7 +16,6 @@ #define raw_smp_processor_id() (S390_lowcore.cpu_nr) extern struct mutex smp_cpu_state_mutex; -extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1]; extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); @@ -29,9 +30,9 @@ extern int smp_store_status(int cpu); extern int smp_vcpu_scheduled(int cpu); extern void smp_yield_cpu(int cpu); extern void smp_yield(void); -extern void smp_stop_cpu(void); extern void smp_cpu_set_polarization(int cpu, int val); extern int smp_cpu_get_polarization(int cpu); +extern void smp_fill_possible_mask(void); #else /* CONFIG_SMP */ @@ -50,10 +51,20 @@ static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } -static inline void smp_stop_cpu(void) { } +static inline void smp_fill_possible_mask(void) { } #endif /* CONFIG_SMP */ +static inline void smp_stop_cpu(void) +{ + u16 pcpu = stap(); + + for (;;) { + __pcpu_sigp(pcpu, SIGP_STOP, 0, NULL); + cpu_relax(); + } +} + #ifdef CONFIG_HOTPLUG_CPU extern int smp_rescan_cpus(void); extern void __noreturn cpu_die(void); diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 701fe8c59e1..96879f7ad6d 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -11,18 +11,21 @@ #include <linux/smp.h> +#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval) + extern int spin_retry; static inline int -_raw_compare_and_swap(volatile unsigned int *lock, - unsigned int old, unsigned int new) +_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) { + unsigned int old_expected = old; + asm volatile( " cs %0,%3,%1" : "=d" (old), "=Q" (*lock) : "0" (old), "d" (new), "Q" (*lock) : "cc", "memory" ); - return old; + return old == old_expected; } /* @@ -34,52 +37,69 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) -#define arch_spin_unlock_wait(lock) \ - do { while (arch_spin_is_locked(lock)) \ - arch_spin_relax(lock); } while (0) +void arch_spin_lock_wait(arch_spinlock_t *); +int arch_spin_trylock_retry(arch_spinlock_t *); +void arch_spin_relax(arch_spinlock_t *); +void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern void arch_spin_lock_wait(arch_spinlock_t *); -extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int arch_spin_trylock_retry(arch_spinlock_t *); -extern void arch_spin_relax(arch_spinlock_t *lock); +static inline u32 arch_spin_lockval(int cpu) +{ + return ~cpu; +} -static inline void arch_spin_lock(arch_spinlock_t *lp) +static inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - int old; + return lock.lock == 0; +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait(lp); +static inline int arch_spin_is_locked(arch_spinlock_t *lp) +{ + return ACCESS_ONCE(lp->lock) != 0; } -static inline void arch_spin_lock_flags(arch_spinlock_t *lp, - unsigned long flags) +static inline int arch_spin_trylock_once(arch_spinlock_t *lp) { - int old; + barrier(); + return likely(arch_spin_value_unlocked(*lp) && + _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL)); +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return; - arch_spin_lock_wait_flags(lp, flags); +static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp) +{ + return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0); } -static inline int arch_spin_trylock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { - int old; + if (!arch_spin_trylock_once(lp)) + arch_spin_lock_wait(lp); +} - old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); - if (likely(old == 0)) - return 1; - return arch_spin_trylock_retry(lp); +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, + unsigned long flags) +{ + if (!arch_spin_trylock_once(lp)) + arch_spin_lock_wait_flags(lp, flags); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lp) +{ + if (!arch_spin_trylock_once(lp)) + return arch_spin_trylock_retry(lp); + return 1; } static inline void arch_spin_unlock(arch_spinlock_t *lp) { - _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); + arch_spin_tryrelease_once(lp); +} + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + while (arch_spin_is_locked(lock)) + arch_spin_relax(lock); } - + /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -110,42 +130,50 @@ extern void _raw_write_lock_wait(arch_rwlock_t *lp); extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); extern int _raw_write_trylock_retry(arch_rwlock_t *lp); +static inline int arch_read_trylock_once(arch_rwlock_t *rw) +{ + unsigned int old = ACCESS_ONCE(rw->lock); + return likely((int) old >= 0 && + _raw_compare_and_swap(&rw->lock, old, old + 1)); +} + +static inline int arch_write_trylock_once(arch_rwlock_t *rw) +{ + unsigned int old = ACCESS_ONCE(rw->lock); + return likely(old == 0 && + _raw_compare_and_swap(&rw->lock, 0, 0x80000000)); +} + static inline void arch_read_lock(arch_rwlock_t *rw) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!arch_read_trylock_once(rw)) _raw_read_lock_wait(rw); } static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old) + if (!arch_read_trylock_once(rw)) _raw_read_lock_wait_flags(rw, flags); } static inline void arch_read_unlock(arch_rwlock_t *rw) { - unsigned int old, cmp; + unsigned int old; - old = rw->lock; do { - cmp = old; - old = _raw_compare_and_swap(&rw->lock, old, old - 1); - } while (cmp != old); + old = ACCESS_ONCE(rw->lock); + } while (!_raw_compare_and_swap(&rw->lock, old, old - 1)); } static inline void arch_write_lock(arch_rwlock_t *rw) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (!arch_write_trylock_once(rw)) _raw_write_lock_wait(rw); } static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { - if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) + if (!arch_write_trylock_once(rw)) _raw_write_lock_wait_flags(rw, flags); } @@ -156,18 +184,16 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned int old; - old = rw->lock & 0x7fffffffU; - if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old)) - return 1; - return _raw_read_trylock_retry(rw); + if (!arch_read_trylock_once(rw)) + return _raw_read_trylock_retry(rw); + return 1; } static inline int arch_write_trylock(arch_rwlock_t *rw) { - if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) - return 1; - return _raw_write_trylock_retry(rw); + if (!arch_write_trylock_once(rw)) + return _raw_write_trylock_retry(rw); + return 1; } #define arch_read_relax(lock) cpu_relax() diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 9c76656a0af..b2cd6ff7c2c 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -6,13 +6,13 @@ #endif typedef struct { - volatile unsigned int owner_cpu; + unsigned int lock; } __attribute__ ((aligned (4))) arch_spinlock_t; -#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, } typedef struct { - volatile unsigned int lock; + unsigned int lock; } arch_rwlock_t; #define __ARCH_RW_LOCK_UNLOCKED { 0 } diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 6dbd559763c..18ea9e3f814 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -13,58 +13,94 @@ extern struct task_struct *__switch_to(void *, void *); extern void update_cr_regs(struct task_struct *task); -static inline void save_fp_regs(s390_fp_regs *fpregs) +static inline int test_fp_ctl(u32 fpc) { + u32 orig_fpc; + int rc; + + if (!MACHINE_HAS_IEEE) + return 0; + asm volatile( - " std 0,%O0+8(%R0)\n" - " std 2,%O0+24(%R0)\n" - " std 4,%O0+40(%R0)\n" - " std 6,%O0+56(%R0)" - : "=Q" (*fpregs) : "Q" (*fpregs)); + " efpc %1\n" + " sfpc %2\n" + "0: sfpc %1\n" + " la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc), "=d" (orig_fpc) + : "d" (fpc), "0" (-EINVAL)); + return rc; +} + +static inline void save_fp_ctl(u32 *fpc) +{ if (!MACHINE_HAS_IEEE) return; + asm volatile( - " stfpc %0\n" - " std 1,%O0+16(%R0)\n" - " std 3,%O0+32(%R0)\n" - " std 5,%O0+48(%R0)\n" - " std 7,%O0+64(%R0)\n" - " std 8,%O0+72(%R0)\n" - " std 9,%O0+80(%R0)\n" - " std 10,%O0+88(%R0)\n" - " std 11,%O0+96(%R0)\n" - " std 12,%O0+104(%R0)\n" - " std 13,%O0+112(%R0)\n" - " std 14,%O0+120(%R0)\n" - " std 15,%O0+128(%R0)\n" - : "=Q" (*fpregs) : "Q" (*fpregs)); + " stfpc %0\n" + : "+Q" (*fpc)); } -static inline void restore_fp_regs(s390_fp_regs *fpregs) +static inline int restore_fp_ctl(u32 *fpc) { + int rc; + + if (!MACHINE_HAS_IEEE) + return 0; + asm volatile( - " ld 0,%O0+8(%R0)\n" - " ld 2,%O0+24(%R0)\n" - " ld 4,%O0+40(%R0)\n" - " ld 6,%O0+56(%R0)" - : : "Q" (*fpregs)); + " lfpc %1\n" + "0: la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); + return rc; +} + +static inline void save_fp_regs(freg_t *fprs) +{ + asm volatile("std 0,%0" : "=Q" (fprs[0])); + asm volatile("std 2,%0" : "=Q" (fprs[2])); + asm volatile("std 4,%0" : "=Q" (fprs[4])); + asm volatile("std 6,%0" : "=Q" (fprs[6])); if (!MACHINE_HAS_IEEE) return; - asm volatile( - " lfpc %0\n" - " ld 1,%O0+16(%R0)\n" - " ld 3,%O0+32(%R0)\n" - " ld 5,%O0+48(%R0)\n" - " ld 7,%O0+64(%R0)\n" - " ld 8,%O0+72(%R0)\n" - " ld 9,%O0+80(%R0)\n" - " ld 10,%O0+88(%R0)\n" - " ld 11,%O0+96(%R0)\n" - " ld 12,%O0+104(%R0)\n" - " ld 13,%O0+112(%R0)\n" - " ld 14,%O0+120(%R0)\n" - " ld 15,%O0+128(%R0)\n" - : : "Q" (*fpregs)); + asm volatile("std 1,%0" : "=Q" (fprs[1])); + asm volatile("std 3,%0" : "=Q" (fprs[3])); + asm volatile("std 5,%0" : "=Q" (fprs[5])); + asm volatile("std 7,%0" : "=Q" (fprs[7])); + asm volatile("std 8,%0" : "=Q" (fprs[8])); + asm volatile("std 9,%0" : "=Q" (fprs[9])); + asm volatile("std 10,%0" : "=Q" (fprs[10])); + asm volatile("std 11,%0" : "=Q" (fprs[11])); + asm volatile("std 12,%0" : "=Q" (fprs[12])); + asm volatile("std 13,%0" : "=Q" (fprs[13])); + asm volatile("std 14,%0" : "=Q" (fprs[14])); + asm volatile("std 15,%0" : "=Q" (fprs[15])); +} + +static inline void restore_fp_regs(freg_t *fprs) +{ + asm volatile("ld 0,%0" : : "Q" (fprs[0])); + asm volatile("ld 2,%0" : : "Q" (fprs[2])); + asm volatile("ld 4,%0" : : "Q" (fprs[4])); + asm volatile("ld 6,%0" : : "Q" (fprs[6])); + if (!MACHINE_HAS_IEEE) + return; + asm volatile("ld 1,%0" : : "Q" (fprs[1])); + asm volatile("ld 3,%0" : : "Q" (fprs[3])); + asm volatile("ld 5,%0" : : "Q" (fprs[5])); + asm volatile("ld 7,%0" : : "Q" (fprs[7])); + asm volatile("ld 8,%0" : : "Q" (fprs[8])); + asm volatile("ld 9,%0" : : "Q" (fprs[9])); + asm volatile("ld 10,%0" : : "Q" (fprs[10])); + asm volatile("ld 11,%0" : : "Q" (fprs[11])); + asm volatile("ld 12,%0" : : "Q" (fprs[12])); + asm volatile("ld 13,%0" : : "Q" (fprs[13])); + asm volatile("ld 14,%0" : : "Q" (fprs[14])); + asm volatile("ld 15,%0" : : "Q" (fprs[15])); } static inline void save_access_regs(unsigned int *acrs) @@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs) #define switch_to(prev,next,last) do { \ if (prev->mm) { \ - save_fp_regs(&prev->thread.fp_regs); \ + save_fp_ctl(&prev->thread.fp_regs.fpc); \ + save_fp_regs(prev->thread.fp_regs.fprs); \ save_access_regs(&prev->thread.acrs[0]); \ save_ri_cb(prev->thread.ri_cb); \ } \ if (next->mm) { \ - restore_fp_regs(&next->thread.fp_regs); \ + restore_fp_ctl(&next->thread.fp_regs.fpc); \ + restore_fp_regs(next->thread.fp_regs.fprs); \ restore_access_regs(&next->thread.acrs[0]); \ restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ update_cr_regs(next); \ @@ -96,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -#define finish_arch_switch(prev) do { \ - set_fs(current->thread.mm_segment); \ -} while (0) - #endif /* __ASM_SWITCH_TO_H */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index cd29d2f4e4f..abad78d5b10 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -12,7 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 -#include <linux/audit.h> +#include <uapi/linux/audit.h> #include <linux/sched.h> #include <linux/err.h> #include <asm/ptrace.h> @@ -28,7 +28,7 @@ extern const unsigned int sys_call_table_emu[]; static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { - return test_tsk_thread_flag(task, TIF_SYSCALL) ? + return test_pt_regs_flag(regs, PIF_SYSCALL) ? (regs->int_code & 0xffff) : -1; } @@ -89,11 +89,10 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->orig_gpr2 = args[0]; } -static inline int syscall_get_arch(struct task_struct *task, - struct pt_regs *regs) +static inline int syscall_get_arch(void) { #ifdef CONFIG_COMPAT - if (test_tsk_thread_flag(task, TIF_31BIT)) + if (test_tsk_thread_flag(current, TIF_31BIT)) return AUDIT_ARCH_S390; #endif return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index eb5f64d26d0..b833e9c0bfb 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -77,27 +77,22 @@ static inline struct thread_info *current_thread_info(void) /* * thread information flags bit numbers */ -#define TIF_SYSCALL 0 /* inside a system call */ -#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ -#define TIF_SIGPENDING 2 /* signal pending */ -#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ -#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ -#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ -#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ -#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ -#define TIF_SECCOMP 10 /* secure computing */ -#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ -#define TIF_31BIT 17 /* 32bit process */ -#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ -#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ -#define TIF_SINGLE_STEP 20 /* This task is single stepped */ +#define TIF_NOTIFY_RESUME 0 /* callback before returning to user */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_SECCOMP 5 /* secure computing */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_31BIT 16 /* 32bit process */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */ +#define TIF_SINGLE_STEP 19 /* This task is single stepped */ +#define TIF_BLOCK_STEP 20 /* This task is block stepped */ -#define _TIF_SYSCALL (1<<TIF_SYSCALL) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) -#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1<<TIF_SECCOMP) @@ -111,6 +106,4 @@ static inline struct thread_info *current_thread_info(void) #define is_32bit_task() (1) #endif -#define PREEMPT_ACTIVE 0x4000000 - #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 8ad8af91503..8beee1cceba 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -71,30 +71,32 @@ static inline void local_tick_enable(unsigned long long comp) typedef unsigned long long cycles_t; -static inline unsigned long long get_tod_clock(void) +static inline void get_tod_clock_ext(char clk[16]) { - unsigned long long clk; - -#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES - asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc"); -#else - asm volatile("stck %0" : "=Q" (clk) : : "cc"); -#endif - return clk; -} + typedef struct { char _[sizeof(clk)]; } addrtype; -static inline void get_tod_clock_ext(char *clk) -{ - asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); + asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); } -static inline unsigned long long get_tod_clock_xt(void) +static inline unsigned long long get_tod_clock(void) { unsigned char clk[16]; get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } +static inline unsigned long long get_tod_clock_fast(void) +{ +#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES + unsigned long long clk; + + asm volatile("stckf %0" : "=Q" (clk) : : "cc"); + return clk; +#else + return get_tod_clock(); +#endif +} + static inline cycles_t get_cycles(void) { return (cycles_t) get_tod_clock() >> 2; @@ -125,7 +127,7 @@ extern u64 sched_clock_base_cc; */ static inline unsigned long long get_tod_clock_monotonic(void) { - return get_tod_clock_xt() - sched_clock_base_cc; + return get_tod_clock() - sched_clock_base_cc; } /** diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 2cb846c4b37..a25f09fbaf3 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -57,16 +57,25 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, tlb->end = end; tlb->fullmm = !(start | (end+1)); tlb->batch = NULL; - if (tlb->fullmm) - __tlb_flush_mm(mm); } -static inline void tlb_flush_mmu(struct mmu_gather *tlb) +static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { __tlb_flush_mm_lazy(tlb->mm); +} + +static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) +{ tlb_table_flush(tlb); } + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush_mmu_tlbonly(tlb); + tlb_flush_mmu_free(tlb); +} + static inline void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) { @@ -96,9 +105,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long address) { - if (!tlb->fullmm) - return page_table_free_rcu(tlb, (unsigned long *) pte); - page_table_free(tlb->mm, (unsigned long *) pte); + page_table_free_rcu(tlb, (unsigned long *) pte); } /* @@ -114,9 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, #ifdef CONFIG_64BIT if (tlb->mm->context.asce_limit <= (1UL << 31)) return; - if (!tlb->fullmm) - return tlb_remove_table(tlb, pmd); - crst_table_free(tlb->mm, (unsigned long *) pmd); + tlb_remove_table(tlb, pmd); #endif } @@ -133,9 +138,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #ifdef CONFIG_64BIT if (tlb->mm->context.asce_limit <= (1UL << 42)) return; - if (!tlb->fullmm) - return tlb_remove_table(tlb, pud); - crst_table_free(tlb->mm, (unsigned long *) pud); + tlb_remove_table(tlb, pud); #endif } diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index f9fef0425fe..16c9c88658c 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h @@ -7,19 +7,41 @@ #include <asm/pgalloc.h> /* - * Flush all tlb entries on the local cpu. + * Flush all TLB entries on the local CPU. */ static inline void __tlb_flush_local(void) { asm volatile("ptlb" : : : "memory"); } -#ifdef CONFIG_SMP /* - * Flush all tlb entries on all cpus. + * Flush TLB entries for a specific ASCE on all CPUs */ +static inline void __tlb_flush_idte(unsigned long asce) +{ + /* Global TLB flush for the mm */ + asm volatile( + " .insn rrf,0xb98e0000,0,%0,%1,0" + : : "a" (2048), "a" (asce) : "cc"); +} + +/* + * Flush TLB entries for a specific ASCE on the local CPU + */ +static inline void __tlb_flush_idte_local(unsigned long asce) +{ + /* Local TLB flush for the mm */ + asm volatile( + " .insn rrf,0xb98e0000,0,%0,%1,1" + : : "a" (2048), "a" (asce) : "cc"); +} + +#ifdef CONFIG_SMP void smp_ptlb_all(void); +/* + * Flush all TLB entries on all CPUs. + */ static inline void __tlb_flush_global(void) { register unsigned long reg2 asm("2"); @@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void) : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" ); } +/* + * Flush TLB entries for a specific mm on all CPUs (in case gmap is used + * this implicates multiple ASCEs!). + */ static inline void __tlb_flush_full(struct mm_struct *mm) { - cpumask_t local_cpumask; - preempt_disable(); - /* - * If the process only ran on the local cpu, do a local flush. - */ - cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id())); - if (cpumask_equal(mm_cpumask(mm), &local_cpumask)) + atomic_add(0x10000, &mm->context.attach_count); + if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { + /* Local TLB flush */ __tlb_flush_local(); - else + } else { + /* Global TLB flush */ __tlb_flush_global(); + /* Reset TLB flush mask */ + if (MACHINE_HAS_TLB_LC) + cpumask_copy(mm_cpumask(mm), + &mm->context.cpu_attach_mask); + } + atomic_sub(0x10000, &mm->context.attach_count); preempt_enable(); } + +/* + * Flush TLB entries for a specific ASCE on all CPUs. + */ +static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) +{ + int active, count; + + preempt_disable(); + active = (mm == current->active_mm) ? 1 : 0; + count = atomic_add_return(0x10000, &mm->context.attach_count); + if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active && + cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { + __tlb_flush_idte_local(asce); + } else { + if (MACHINE_HAS_IDTE) + __tlb_flush_idte(asce); + else + __tlb_flush_global(); + /* Reset TLB flush mask */ + if (MACHINE_HAS_TLB_LC) + cpumask_copy(mm_cpumask(mm), + &mm->context.cpu_attach_mask); + } + atomic_sub(0x10000, &mm->context.attach_count); + preempt_enable(); +} + +static inline void __tlb_flush_kernel(void) +{ + if (MACHINE_HAS_IDTE) + __tlb_flush_idte((unsigned long) init_mm.pgd | + init_mm.context.asce_bits); + else + __tlb_flush_global(); +} #else -#define __tlb_flush_full(mm) __tlb_flush_local() #define __tlb_flush_global() __tlb_flush_local() -#endif +#define __tlb_flush_full(mm) __tlb_flush_local() /* - * Flush all tlb entries of a page table on all cpus. + * Flush TLB entries for a specific ASCE on all CPUs. */ -static inline void __tlb_flush_idte(unsigned long asce) +static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce) { - asm volatile( - " .insn rrf,0xb98e0000,0,%0,%1,0" - : : "a" (2048), "a" (asce) : "cc" ); + if (MACHINE_HAS_TLB_LC) + __tlb_flush_idte_local(asce); + else + __tlb_flush_local(); } +static inline void __tlb_flush_kernel(void) +{ + if (MACHINE_HAS_TLB_LC) + __tlb_flush_idte_local((unsigned long) init_mm.pgd | + init_mm.context.asce_bits); + else + __tlb_flush_local(); +} +#endif + static inline void __tlb_flush_mm(struct mm_struct * mm) { /* @@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) * only ran on the local cpu. */ if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list)) - __tlb_flush_idte((unsigned long) mm->pgd | + __tlb_flush_asce(mm, (unsigned long) mm->pgd | mm->context.asce_bits); else __tlb_flush_full(mm); @@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - __tlb_flush_mm(&init_mm); + __tlb_flush_kernel(); } #endif /* _S390_TLBFLUSH_H */ diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 05425b18c0a..56af53093d2 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -26,21 +26,12 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; #define mc_capable() 1 -static inline const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_topology[cpu].core_mask; -} - -static inline const struct cpumask *cpu_book_mask(int cpu) -{ - return &cpu_topology[cpu].book_mask; -} - int topology_cpu_init(struct cpu *); int topology_set_cpu_management(int fc); void topology_schedule_update(void); void store_topology(struct sysinfo_15_1_x *info); void topology_expect_change(void); +const struct cpumask *cpu_coregroup_mask(int cpu); #else /* CONFIG_SCHED_BOOK */ @@ -64,8 +55,6 @@ static inline void s390_init_cpu_topology(void) }; #endif -#define SD_BOOK_INIT SD_CPU_INIT - #include <asm-generic/topology.h> #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 9c33ed4e666..cd4c68e0398 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -92,39 +92,88 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x) #define ARCH_HAS_SORT_EXTABLE #define ARCH_HAS_SEARCH_EXTABLE -struct uaccess_ops { - size_t (*copy_from_user)(size_t, const void __user *, void *); - size_t (*copy_from_user_small)(size_t, const void __user *, void *); - size_t (*copy_to_user)(size_t, void __user *, const void *); - size_t (*copy_to_user_small)(size_t, void __user *, const void *); - size_t (*copy_in_user)(size_t, void __user *, const void __user *); - size_t (*clear_user)(size_t, void __user *); - size_t (*strnlen_user)(size_t, const char __user *); - size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); -}; +/** + * __copy_from_user: - Copy a block of data from user space, with less checking. + * @to: Destination address, in kernel space. + * @from: Source address, in user space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from user space to kernel space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + * + * If some data could not be copied, this function will pad the copied + * data to the requested size using zero bytes. + */ +unsigned long __must_check __copy_from_user(void *to, const void __user *from, + unsigned long n); -extern struct uaccess_ops uaccess; -extern struct uaccess_ops uaccess_std; -extern struct uaccess_ops uaccess_mvcos; -extern struct uaccess_ops uaccess_mvcos_switch; -extern struct uaccess_ops uaccess_pt; +/** + * __copy_to_user: - Copy a block of data into user space, with less checking. + * @to: Destination address, in user space. + * @from: Source address, in kernel space. + * @n: Number of bytes to copy. + * + * Context: User context only. This function may sleep. + * + * Copy data from kernel space to user space. Caller must check + * the specified block with access_ok() before calling this function. + * + * Returns number of bytes that could not be copied. + * On success, this will be zero. + */ +unsigned long __must_check __copy_to_user(void __user *to, const void *from, + unsigned long n); + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES + +#define __put_get_user_asm(to, from, size, spec) \ +({ \ + register unsigned long __reg0 asm("0") = spec; \ + int __rc; \ + \ + asm volatile( \ + "0: mvcos %1,%3,%2\n" \ + "1: xr %0,%0\n" \ + "2:\n" \ + ".pushsection .fixup, \"ax\"\n" \ + "3: lhi %0,%5\n" \ + " jg 2b\n" \ + ".popsection\n" \ + EX_TABLE(0b,3b) EX_TABLE(1b,3b) \ + : "=d" (__rc), "=Q" (*(to)) \ + : "d" (size), "Q" (*(from)), \ + "d" (__reg0), "K" (-EFAULT) \ + : "cc"); \ + __rc; \ +}) + +#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL) +#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL) -extern int __handle_fault(unsigned long, unsigned long, int); +#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */ -static inline int __put_user_fn(size_t size, void __user *ptr, void *x) +static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { - size = uaccess.copy_to_user_small(size, ptr, x); - return size ? -EFAULT : size; + size = __copy_to_user(ptr, x, size); + return size ? -EFAULT : 0; } -static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) +static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { - size = uaccess.copy_from_user_small(size, ptr, x); - return size ? -EFAULT : size; + size = __copy_from_user(x, ptr, size); + return size ? -EFAULT : 0; } +#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */ + /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. @@ -139,8 +188,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) case 2: \ case 4: \ case 8: \ - __pu_err = __put_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __pu_err = __put_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ break; \ default: \ __put_user_bad(); \ @@ -156,7 +205,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) }) -extern int __put_user_bad(void) __attribute__((noreturn)); +int __put_user_bad(void) __attribute__((noreturn)); #define __get_user(x, ptr) \ ({ \ @@ -165,29 +214,29 @@ extern int __put_user_bad(void) __attribute__((noreturn)); switch (sizeof(*(ptr))) { \ case 1: { \ unsigned char __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 2: { \ unsigned short __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 4: { \ unsigned int __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ case 8: { \ unsigned long long __x; \ - __gu_err = __get_user_fn(sizeof (*(ptr)), \ - ptr, &__x); \ + __gu_err = __get_user_fn(&__x, ptr, \ + sizeof(*(ptr))); \ (x) = *(__force __typeof__(*(ptr)) *) &__x; \ break; \ }; \ @@ -204,38 +253,12 @@ extern int __put_user_bad(void) __attribute__((noreturn)); __get_user(x, ptr); \ }) -extern int __get_user_bad(void) __attribute__((noreturn)); +int __get_user_bad(void) __attribute__((noreturn)); #define __put_user_unaligned __put_user #define __get_user_unaligned __get_user /** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (__builtin_constant_p(n) && (n <= 256)) - return uaccess.copy_to_user_small(n, to, from); - else - return uaccess.copy_to_user(n, to, from); -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -/** * copy_to_user: - Copy a block of data into user space. * @to: Destination address, in user space. * @from: Source address, in kernel space. @@ -255,33 +278,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return __copy_to_user(to, from, n); } -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (__builtin_constant_p(n) && (n <= 256)) - return uaccess.copy_from_user_small(n, from, to); - else - return uaccess.copy_from_user(n, from, to); -} - -extern void copy_from_user_overflow(void) +void copy_from_user_overflow(void) #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS __compiletime_warning("copy_from_user() buffer size is not provably correct") #endif @@ -316,11 +313,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n) return __copy_from_user(to, from, n); } -static inline unsigned long __must_check -__copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - return uaccess.copy_in_user(n, to, from); -} +unsigned long __must_check +__copy_in_user(void __user *to, const void __user *from, unsigned long n); static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) @@ -332,18 +326,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n) /* * Copy a null terminated string from userspace. */ + +long __strncpy_from_user(char *dst, const char __user *src, long count); + static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) { might_fault(); - return uaccess.strncpy_from_user(count, src, dst); + return __strncpy_from_user(dst, src, count); } -static inline unsigned long -strnlen_user(const char __user * src, unsigned long n) +unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count); + +static inline unsigned long strnlen_user(const char __user *src, unsigned long n) { might_fault(); - return uaccess.strnlen_user(n, src); + return __strnlen_user(src, n); } /** @@ -365,21 +363,14 @@ strnlen_user(const char __user * src, unsigned long n) /* * Zero Userspace */ +unsigned long __must_check __clear_user(void __user *to, unsigned long size); -static inline unsigned long __must_check -__clear_user(void __user *to, unsigned long n) -{ - return uaccess.clear_user(n, to); -} - -static inline unsigned long __must_check -clear_user(void __user *to, unsigned long n) +static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { might_fault(); - return uaccess.clear_user(n, to); + return __clear_user(to, n); } -extern int copy_to_user_real(void __user *dest, void *src, size_t count); -extern int copy_from_user_real(void *dest, void __user *src, size_t count); +int copy_to_user_real(void __user *dest, void *src, unsigned long count); #endif /* __S390_UACCESS_H */ diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h index a73eb2e1e91..bc9746a7d47 100644 --- a/arch/s390/include/asm/vdso.h +++ b/arch/s390/include/asm/vdso.h @@ -26,8 +26,9 @@ struct vdso_data { __u64 wtom_clock_nsec; /* 0x28 */ __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ __u32 tz_dsttime; /* Type of dst correction 0x34 */ - __u32 ectg_available; - __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ + __u32 ectg_available; /* ECTG instruction present 0x38 */ + __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ + __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ }; struct vdso_per_cpu_data { diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index 6a9a9eb645f..736637363d3 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild @@ -36,6 +36,7 @@ header-y += signal.h header-y += socket.h header-y += sockios.h header-y += sclp_ctl.h +header-y += sie.h header-y += stat.h header-y += statfs.h header-y += swab.h diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h new file mode 100644 index 00000000000..37998b44953 --- /dev/null +++ b/arch/s390/include/uapi/asm/hypfs.h @@ -0,0 +1,25 @@ +/* + * IOCTL interface for hypfs + * + * Copyright IBM Corp. 2013 + * + * Author: Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#ifndef _ASM_HYPFS_CTL_H +#define _ASM_HYPFS_CTL_H + +#include <linux/types.h> + +struct hypfs_diag304 { + __u32 args[2]; + __u64 data; + __u64 rc; +} __attribute__((packed)); + +#define HYPFS_IOCTL_MAGIC 0x10 + +#define HYPFS_DIAG304 \ + _IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304) + +#endif diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h index d25da598ec6..0fc26430a1e 100644 --- a/arch/s390/include/uapi/asm/kvm.h +++ b/arch/s390/include/uapi/asm/kvm.h @@ -15,6 +15,52 @@ #include <linux/types.h> #define __KVM_S390 +#define __KVM_HAVE_GUEST_DEBUG + +/* Device control API: s390-specific devices */ +#define KVM_DEV_FLIC_GET_ALL_IRQS 1 +#define KVM_DEV_FLIC_ENQUEUE 2 +#define KVM_DEV_FLIC_CLEAR_IRQS 3 +#define KVM_DEV_FLIC_APF_ENABLE 4 +#define KVM_DEV_FLIC_APF_DISABLE_WAIT 5 +#define KVM_DEV_FLIC_ADAPTER_REGISTER 6 +#define KVM_DEV_FLIC_ADAPTER_MODIFY 7 +/* + * We can have up to 4*64k pending subchannels + 8 adapter interrupts, + * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. + * There are also sclp and machine checks. This gives us + * sizeof(kvm_s390_irq)*(4*65536+8+64*64+1+1) = 72 * 266250 = 19170000 + * Lets round up to 8192 pages. + */ +#define KVM_S390_MAX_FLOAT_IRQS 266250 +#define KVM_S390_FLIC_MAX_BUFFER 0x2000000 + +struct kvm_s390_io_adapter { + __u32 id; + __u8 isc; + __u8 maskable; + __u8 swap; + __u8 pad; +}; + +#define KVM_S390_IO_ADAPTER_MASK 1 +#define KVM_S390_IO_ADAPTER_MAP 2 +#define KVM_S390_IO_ADAPTER_UNMAP 3 + +struct kvm_s390_io_adapter_req { + __u32 id; + __u8 type; + __u8 mask; + __u16 pad0; + __u64 addr; +}; + +/* kvm attr_group on vm fd */ +#define KVM_S390_VM_MEM_CTRL 0 + +/* kvm attributes for mem_ctrl */ +#define KVM_S390_VM_MEM_ENABLE_CMMA 0 +#define KVM_S390_VM_MEM_CLR_CMMA 1 /* for KVM_GET_REGS and KVM_SET_REGS */ struct kvm_regs { @@ -34,11 +80,31 @@ struct kvm_fpu { __u64 fprs[16]; }; +#define KVM_GUESTDBG_USE_HW_BP 0x00010000 + +#define KVM_HW_BP 1 +#define KVM_HW_WP_WRITE 2 +#define KVM_SINGLESTEP 4 + struct kvm_debug_exit_arch { + __u64 addr; + __u8 type; + __u8 pad[7]; /* Should be set to 0 */ +}; + +struct kvm_hw_breakpoint { + __u64 addr; + __u64 phys_addr; + __u64 len; + __u8 type; + __u8 pad[7]; /* Should be set to 0 */ }; /* for KVM_SET_GUEST_DEBUG */ struct kvm_guest_debug_arch { + __u32 nr_hw_bp; + __u32 pad; /* Should be set to 0 */ + struct kvm_hw_breakpoint __user *hw_bp; }; #define KVM_SYNC_PREFIX (1UL << 0) @@ -57,4 +123,9 @@ struct kvm_sync_regs { #define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2) #define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3) #define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4) +#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5) +#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6) +#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7) +#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8) +#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9) #endif diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h index 7a84619e315..a150f4fabe4 100644 --- a/arch/s390/include/uapi/asm/ptrace.h +++ b/arch/s390/include/uapi/asm/ptrace.h @@ -199,6 +199,7 @@ typedef union typedef struct { __u32 fpc; + __u32 pad; freg_t fprs[NUM_FPRS]; } s390_fp_regs; @@ -206,7 +207,6 @@ typedef struct #define FPC_FLAGS_MASK 0x00F80000 #define FPC_DXC_MASK 0x0000FF00 #define FPC_RM_MASK 0x00000003 -#define FPC_VALID_MASK 0xF8F8FF03 /* this typedef defines how a Program Status Word looks like */ typedef struct @@ -263,7 +263,7 @@ typedef struct #define PSW_MASK_EA 0x0000000100000000UL #define PSW_MASK_BA 0x0000000080000000UL -#define PSW_MASK_USER 0x0000FF8180000000UL +#define PSW_MASK_USER 0x0000FF0180000000UL #define PSW_ADDR_AMODE 0x0000000000000000UL #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL @@ -403,6 +403,12 @@ typedef struct #define PTRACE_TE_ABORT_RAND 0x5011 /* + * The numbers chosen here are somewhat arbitrary but absolutely MUST + * not overlap with any of the number assigned in <linux/ptrace.h>. + */ +#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ + +/* * PT_PROT definition is loosely based on hppa bsd definition in * gdb/hppab-nat.c */ diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h new file mode 100644 index 00000000000..5d9cc19462c --- /dev/null +++ b/arch/s390/include/uapi/asm/sie.h @@ -0,0 +1,243 @@ +#ifndef _UAPI_ASM_S390_SIE_H +#define _UAPI_ASM_S390_SIE_H + +#define diagnose_codes \ + { 0x10, "DIAG (0x10) release pages" }, \ + { 0x44, "DIAG (0x44) time slice end" }, \ + { 0x9c, "DIAG (0x9c) time slice end directed" }, \ + { 0x204, "DIAG (0x204) logical-cpu utilization" }, \ + { 0x258, "DIAG (0x258) page-reference services" }, \ + { 0x308, "DIAG (0x308) ipl functions" }, \ + { 0x500, "DIAG (0x500) KVM virtio functions" }, \ + { 0x501, "DIAG (0x501) KVM breakpoint" } + +#define sigp_order_codes \ + { 0x01, "SIGP sense" }, \ + { 0x02, "SIGP external call" }, \ + { 0x03, "SIGP emergency signal" }, \ + { 0x05, "SIGP stop" }, \ + { 0x06, "SIGP restart" }, \ + { 0x09, "SIGP stop and store status" }, \ + { 0x0b, "SIGP initial cpu reset" }, \ + { 0x0d, "SIGP set prefix" }, \ + { 0x0e, "SIGP store status at address" }, \ + { 0x12, "SIGP set architecture" }, \ + { 0x15, "SIGP sense running" } + +#define icpt_prog_codes \ + { 0x0001, "Prog Operation" }, \ + { 0x0002, "Prog Privileged Operation" }, \ + { 0x0003, "Prog Execute" }, \ + { 0x0004, "Prog Protection" }, \ + { 0x0005, "Prog Addressing" }, \ + { 0x0006, "Prog Specification" }, \ + { 0x0007, "Prog Data" }, \ + { 0x0008, "Prog Fixedpoint overflow" }, \ + { 0x0009, "Prog Fixedpoint divide" }, \ + { 0x000A, "Prog Decimal overflow" }, \ + { 0x000B, "Prog Decimal divide" }, \ + { 0x000C, "Prog HFP exponent overflow" }, \ + { 0x000D, "Prog HFP exponent underflow" }, \ + { 0x000E, "Prog HFP significance" }, \ + { 0x000F, "Prog HFP divide" }, \ + { 0x0010, "Prog Segment translation" }, \ + { 0x0011, "Prog Page translation" }, \ + { 0x0012, "Prog Translation specification" }, \ + { 0x0013, "Prog Special operation" }, \ + { 0x0015, "Prog Operand" }, \ + { 0x0016, "Prog Trace table" }, \ + { 0x0017, "Prog ASNtranslation specification" }, \ + { 0x001C, "Prog Spaceswitch event" }, \ + { 0x001D, "Prog HFP square root" }, \ + { 0x001F, "Prog PCtranslation specification" }, \ + { 0x0020, "Prog AFX translation" }, \ + { 0x0021, "Prog ASX translation" }, \ + { 0x0022, "Prog LX translation" }, \ + { 0x0023, "Prog EX translation" }, \ + { 0x0024, "Prog Primary authority" }, \ + { 0x0025, "Prog Secondary authority" }, \ + { 0x0026, "Prog LFXtranslation exception" }, \ + { 0x0027, "Prog LSXtranslation exception" }, \ + { 0x0028, "Prog ALET specification" }, \ + { 0x0029, "Prog ALEN translation" }, \ + { 0x002A, "Prog ALE sequence" }, \ + { 0x002B, "Prog ASTE validity" }, \ + { 0x002C, "Prog ASTE sequence" }, \ + { 0x002D, "Prog Extended authority" }, \ + { 0x002E, "Prog LSTE sequence" }, \ + { 0x002F, "Prog ASTE instance" }, \ + { 0x0030, "Prog Stack full" }, \ + { 0x0031, "Prog Stack empty" }, \ + { 0x0032, "Prog Stack specification" }, \ + { 0x0033, "Prog Stack type" }, \ + { 0x0034, "Prog Stack operation" }, \ + { 0x0039, "Prog Region first translation" }, \ + { 0x003A, "Prog Region second translation" }, \ + { 0x003B, "Prog Region third translation" }, \ + { 0x0040, "Prog Monitor event" }, \ + { 0x0080, "Prog PER event" }, \ + { 0x0119, "Prog Crypto operation" } + +#define exit_code_ipa0(ipa0, opcode, mnemonic) \ + { (ipa0 << 8 | opcode), #ipa0 " " mnemonic } +#define exit_code(opcode, mnemonic) \ + { opcode, mnemonic } + +#define icpt_insn_codes \ + exit_code_ipa0(0x01, 0x01, "PR"), \ + exit_code_ipa0(0x01, 0x04, "PTFF"), \ + exit_code_ipa0(0x01, 0x07, "SCKPF"), \ + exit_code_ipa0(0xAA, 0x00, "RINEXT"), \ + exit_code_ipa0(0xAA, 0x01, "RION"), \ + exit_code_ipa0(0xAA, 0x02, "TRIC"), \ + exit_code_ipa0(0xAA, 0x03, "RIOFF"), \ + exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \ + exit_code_ipa0(0xB2, 0x02, "STIDP"), \ + exit_code_ipa0(0xB2, 0x04, "SCK"), \ + exit_code_ipa0(0xB2, 0x05, "STCK"), \ + exit_code_ipa0(0xB2, 0x06, "SCKC"), \ + exit_code_ipa0(0xB2, 0x07, "STCKC"), \ + exit_code_ipa0(0xB2, 0x08, "SPT"), \ + exit_code_ipa0(0xB2, 0x09, "STPT"), \ + exit_code_ipa0(0xB2, 0x0d, "PTLB"), \ + exit_code_ipa0(0xB2, 0x10, "SPX"), \ + exit_code_ipa0(0xB2, 0x11, "STPX"), \ + exit_code_ipa0(0xB2, 0x12, "STAP"), \ + exit_code_ipa0(0xB2, 0x14, "SIE"), \ + exit_code_ipa0(0xB2, 0x16, "SETR"), \ + exit_code_ipa0(0xB2, 0x17, "STETR"), \ + exit_code_ipa0(0xB2, 0x18, "PC"), \ + exit_code_ipa0(0xB2, 0x20, "SERVC"), \ + exit_code_ipa0(0xB2, 0x28, "PT"), \ + exit_code_ipa0(0xB2, 0x29, "ISKE"), \ + exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ + exit_code_ipa0(0xB2, 0x2b, "SSKE"), \ + exit_code_ipa0(0xB2, 0x2c, "TB"), \ + exit_code_ipa0(0xB2, 0x2e, "PGIN"), \ + exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \ + exit_code_ipa0(0xB2, 0x30, "CSCH"), \ + exit_code_ipa0(0xB2, 0x31, "HSCH"), \ + exit_code_ipa0(0xB2, 0x32, "MSCH"), \ + exit_code_ipa0(0xB2, 0x33, "SSCH"), \ + exit_code_ipa0(0xB2, 0x34, "STSCH"), \ + exit_code_ipa0(0xB2, 0x35, "TSCH"), \ + exit_code_ipa0(0xB2, 0x36, "TPI"), \ + exit_code_ipa0(0xB2, 0x37, "SAL"), \ + exit_code_ipa0(0xB2, 0x38, "RSCH"), \ + exit_code_ipa0(0xB2, 0x39, "STCRW"), \ + exit_code_ipa0(0xB2, 0x3a, "STCPS"), \ + exit_code_ipa0(0xB2, 0x3b, "RCHP"), \ + exit_code_ipa0(0xB2, 0x3c, "SCHM"), \ + exit_code_ipa0(0xB2, 0x40, "BAKR"), \ + exit_code_ipa0(0xB2, 0x48, "PALB"), \ + exit_code_ipa0(0xB2, 0x4c, "TAR"), \ + exit_code_ipa0(0xB2, 0x50, "CSP"), \ + exit_code_ipa0(0xB2, 0x54, "MVPG"), \ + exit_code_ipa0(0xB2, 0x58, "BSG"), \ + exit_code_ipa0(0xB2, 0x5a, "BSA"), \ + exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ + exit_code_ipa0(0xB2, 0x74, "SIGA"), \ + exit_code_ipa0(0xB2, 0x76, "XSCH"), \ + exit_code_ipa0(0xB2, 0x78, "STCKE"), \ + exit_code_ipa0(0xB2, 0x7c, "STCKF"), \ + exit_code_ipa0(0xB2, 0x7d, "STSI"), \ + exit_code_ipa0(0xB2, 0xb0, "STFLE"), \ + exit_code_ipa0(0xB2, 0xb1, "STFL"), \ + exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \ + exit_code_ipa0(0xB2, 0xf8, "TEND"), \ + exit_code_ipa0(0xB2, 0xfc, "TABORT"), \ + exit_code_ipa0(0xB9, 0x1e, "KMAC"), \ + exit_code_ipa0(0xB9, 0x28, "PCKMO"), \ + exit_code_ipa0(0xB9, 0x2a, "KMF"), \ + exit_code_ipa0(0xB9, 0x2b, "KMO"), \ + exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \ + exit_code_ipa0(0xB9, 0x2e, "KM"), \ + exit_code_ipa0(0xB9, 0x2f, "KMC"), \ + exit_code_ipa0(0xB9, 0x3e, "KIMD"), \ + exit_code_ipa0(0xB9, 0x3f, "KLMD"), \ + exit_code_ipa0(0xB9, 0x8a, "CSPG"), \ + exit_code_ipa0(0xB9, 0x8d, "EPSW"), \ + exit_code_ipa0(0xB9, 0x8e, "IDTE"), \ + exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \ + exit_code_ipa0(0xB9, 0x9c, "EQBS"), \ + exit_code_ipa0(0xB9, 0xa2, "PTF"), \ + exit_code_ipa0(0xB9, 0xab, "ESSA"), \ + exit_code_ipa0(0xB9, 0xae, "RRBM"), \ + exit_code_ipa0(0xB9, 0xaf, "PFMF"), \ + exit_code_ipa0(0xE3, 0x03, "LRAG"), \ + exit_code_ipa0(0xE3, 0x13, "LRAY"), \ + exit_code_ipa0(0xE3, 0x25, "NTSTG"), \ + exit_code_ipa0(0xE5, 0x00, "LASP"), \ + exit_code_ipa0(0xE5, 0x01, "TPROT"), \ + exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \ + exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \ + exit_code_ipa0(0xEB, 0x25, "STCTG"), \ + exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \ + exit_code_ipa0(0xEB, 0x60, "LRIC"), \ + exit_code_ipa0(0xEB, 0x61, "STRIC"), \ + exit_code_ipa0(0xEB, 0x62, "MRIC"), \ + exit_code_ipa0(0xEB, 0x8a, "SQBS"), \ + exit_code_ipa0(0xC8, 0x01, "ECTG"), \ + exit_code(0x0a, "SVC"), \ + exit_code(0x80, "SSM"), \ + exit_code(0x82, "LPSW"), \ + exit_code(0x83, "DIAG"), \ + exit_code(0xae, "SIGP"), \ + exit_code(0xac, "STNSM"), \ + exit_code(0xad, "STOSM"), \ + exit_code(0xb1, "LRA"), \ + exit_code(0xb6, "STCTL"), \ + exit_code(0xb7, "LCTL"), \ + exit_code(0xee, "PLO") + +#define sie_intercept_code \ + { 0x00, "Host interruption" }, \ + { 0x04, "Instruction" }, \ + { 0x08, "Program interruption" }, \ + { 0x0c, "Instruction and program interruption" }, \ + { 0x10, "External request" }, \ + { 0x14, "External interruption" }, \ + { 0x18, "I/O request" }, \ + { 0x1c, "Wait state" }, \ + { 0x20, "Validity" }, \ + { 0x28, "Stop request" }, \ + { 0x2c, "Operation exception" }, \ + { 0x38, "Partial-execution" }, \ + { 0x3c, "I/O interruption" }, \ + { 0x40, "I/O instruction" }, \ + { 0x48, "Timing subset" } + +/* + * This is the simple interceptable instructions decoder. + * + * It will be used as userspace interface and it can be used in places + * that does not allow to use general decoder functions, + * such as trace events declarations. + * + * Some userspace tools may want to parse this code + * and would be confused by switch(), if() and other statements, + * but they can understand conditional operator. + */ +#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \ + (insn >> 56) == (ipa0) ? \ + ((ipa0 << 8) | ((insn >> rshift) & mask)) : + +#define INSN_DECODE(insn) (insn >> 56) + +/* + * The macro icpt_insn_decoder() takes an intercepted instruction + * and returns a key, which can be used to find a mnemonic name + * of the instruction in the icpt_insn_codes table. + */ +#define icpt_insn_decoder(insn) \ + INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ + INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ + INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ + INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ + INSN_DECODE(insn) + +#endif /* _UAPI_ASM_S390_SIE_H */ diff --git a/arch/s390/include/uapi/asm/sigcontext.h b/arch/s390/include/uapi/asm/sigcontext.h index 584787f6ce4..b30de9c01bb 100644 --- a/arch/s390/include/uapi/asm/sigcontext.h +++ b/arch/s390/include/uapi/asm/sigcontext.h @@ -49,6 +49,7 @@ typedef struct typedef struct { unsigned int fpc; + unsigned int pad; double fprs[__NUM_FPRS]; } _s390_fp_regs; diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 92494494692..e031332096d 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -82,4 +82,8 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + +#define SO_BPF_EXTENSIONS 48 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/statfs.h b/arch/s390/include/uapi/asm/statfs.h index a61d538756f..471eb09184d 100644 --- a/arch/s390/include/uapi/asm/statfs.h +++ b/arch/s390/include/uapi/asm/statfs.h @@ -35,11 +35,11 @@ struct statfs { struct statfs64 { unsigned int f_type; unsigned int f_bsize; - unsigned long f_blocks; - unsigned long f_bfree; - unsigned long f_bavail; - unsigned long f_files; - unsigned long f_ffree; + unsigned long long f_blocks; + unsigned long long f_bfree; + unsigned long long f_bavail; + unsigned long long f_files; + unsigned long long f_ffree; __kernel_fsid_t f_fsid; unsigned int f_namelen; unsigned int f_frsize; diff --git a/arch/s390/include/uapi/asm/ucontext.h b/arch/s390/include/uapi/asm/ucontext.h index 200e06325c6..3e077b2a470 100644 --- a/arch/s390/include/uapi/asm/ucontext.h +++ b/arch/s390/include/uapi/asm/ucontext.h @@ -16,7 +16,9 @@ struct ucontext_extended { struct ucontext *uc_link; stack_t uc_stack; _sigregs uc_mcontext; - unsigned long uc_sigmask[2]; + sigset_t uc_sigmask; + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + unsigned char __unused[128 - sizeof(sigset_t)]; unsigned long uc_gprs_high[16]; }; @@ -27,7 +29,9 @@ struct ucontext { struct ucontext *uc_link; stack_t uc_stack; _sigregs uc_mcontext; - sigset_t uc_sigmask; /* mask last for extensibility */ + sigset_t uc_sigmask; + /* Allow for uc_sigmask growth. Glibc uses a 1024-bit sigset_t. */ + unsigned char __unused[128 - sizeof(sigset_t)]; }; #endif /* !_ASM_S390_UCONTEXT_H */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 864f693c237..3802d2d3a18 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -280,7 +280,10 @@ #define __NR_s390_runtime_instr 342 #define __NR_kcmp 343 #define __NR_finit_module 344 -#define NR_syscalls 345 +#define __NR_sched_setattr 345 +#define __NR_sched_getattr 346 +#define __NR_renameat2 347 +#define NR_syscalls 348 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h index e83fc116f5b..f2b18eacaca 100644 --- a/arch/s390/include/uapi/asm/zcrypt.h +++ b/arch/s390/include/uapi/asm/zcrypt.h @@ -154,6 +154,67 @@ struct ica_xcRB { unsigned short priority_window; unsigned int status; } __attribute__((packed)); + +/** + * struct ep11_cprb - EP11 connectivity programming request block + * @cprb_len: CPRB header length [0x0020] + * @cprb_ver_id: CPRB version id. [0x04] + * @pad_000: Alignment pad bytes + * @flags: Admin cmd [0x80] or functional cmd [0x00] + * @func_id: Function id / subtype [0x5434] + * @source_id: Source id [originator id] + * @target_id: Target id [usage/ctrl domain id] + * @ret_code: Return code + * @reserved1: Reserved + * @reserved2: Reserved + * @payload_len: Payload length + */ +struct ep11_cprb { + uint16_t cprb_len; + unsigned char cprb_ver_id; + unsigned char pad_000[2]; + unsigned char flags; + unsigned char func_id[2]; + uint32_t source_id; + uint32_t target_id; + uint32_t ret_code; + uint32_t reserved1; + uint32_t reserved2; + uint32_t payload_len; +} __attribute__((packed)); + +/** + * struct ep11_target_dev - EP11 target device list + * @ap_id: AP device id + * @dom_id: Usage domain id + */ +struct ep11_target_dev { + uint16_t ap_id; + uint16_t dom_id; +}; + +/** + * struct ep11_urb - EP11 user request block + * @targets_num: Number of target adapters + * @targets: Addr to target adapter list + * @weight: Level of request priority + * @req_no: Request id/number + * @req_len: Request length + * @req: Addr to request block + * @resp_len: Response length + * @resp: Addr to response block + */ +struct ep11_urb { + uint16_t targets_num; + uint64_t targets; + uint64_t weight; + uint64_t req_no; + uint64_t req_len; + uint64_t req; + uint64_t resp_len; + uint64_t resp; +} __attribute__((packed)); + #define AUTOSELECT ((unsigned int)0xFFFFFFFF) #define ZCRYPT_IOCTL_MAGIC 'z' @@ -183,6 +244,9 @@ struct ica_xcRB { * ZSECSENDCPRB * Send an arbitrary CPRB to a crypto card. * + * ZSENDEP11CPRB + * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card. + * * Z90STAT_STATUS_MASK * Return an 64 element array of unsigned chars for the status of * all devices. @@ -256,6 +320,7 @@ struct ica_xcRB { #define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) #define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) #define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) +#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0) /* New status calls */ #define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) |
