From 03e3b5a0f18c53560de1984fbbfca146d31da2a5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 17 Feb 2011 13:13:56 +0100 Subject: [S390] atomic: use ACCESS_ONCE() for atomic_read() Let's make atomic_read() and atomic_set() behave like on all/most other architectures. Generated code is identical with gcc 4.5.2. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/atomic.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea11718..50cfb5ed601 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -36,14 +36,12 @@ static inline int atomic_read(const atomic_t *v) { - barrier(); - return v->counter; + return ACCESS_ONCE(v->counter); } static inline void atomic_set(atomic_t *v, int i) { v->counter = i; - barrier(); } static inline int atomic_add_return(int i, atomic_t *v) @@ -128,14 +126,12 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long long atomic64_read(const atomic64_t *v) { - barrier(); - return v->counter; + return ACCESS_ONCE(v->counter); } static inline void atomic64_set(atomic64_t *v, long long i) { v->counter = i; - barrier(); } static inline long long atomic64_add_return(long long i, atomic64_t *v) -- cgit v1.2.3-18-g5258 From a8c8d7c683419d059e302373afc6998244f5f60f Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 17 Feb 2011 13:13:57 +0100 Subject: [S390] correct ipl parameter block safe guard The 'output' variable is passed from decompress_kernel to check_ipl_parmblock before it is initialized. That disables the safe guard against the overwrite of the ipl parameter block. Fix this by passing the correct value to check_ipl_parmblock. Reported-by: David Binderman Signed-off-by: Martin Schwidefsky --- arch/s390/boot/compressed/misc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919..2751b3a8a66 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c @@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) unsigned long output_addr; unsigned char *output; - check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); + output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; + check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); memset(&_bss, 0, &_ebss - &_bss); free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; - output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); + output = (unsigned char *) output_addr; #ifdef CONFIG_BLK_DEV_INITRD /* -- cgit v1.2.3-18-g5258 From 7657e41a0bd16c9d8b3cefe8fd5d6ac3c25ae4bf Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 17 Feb 2011 13:13:58 +0100 Subject: [S390] atomic: use inline asm Use inline assemblies for atomic_read/set(). This way there shouldn't be any questions or subtle volatile semantics left. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/atomic.h | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 50cfb5ed601..5c5ba10384c 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -36,12 +36,19 @@ static inline int atomic_read(const atomic_t *v) { - return ACCESS_ONCE(v->counter); + int c; + + asm volatile( + " l %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic_set(atomic_t *v, int i) { - v->counter = i; + asm volatile( + " st %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline int atomic_add_return(int i, atomic_t *v) @@ -126,12 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) static inline long long atomic64_read(const atomic64_t *v) { - return ACCESS_ONCE(v->counter); + long long c; + + asm volatile( + " lg %0,%1\n" + : "=d" (c) : "Q" (v->counter)); + return c; } static inline void atomic64_set(atomic64_t *v, long long i) { - v->counter = i; + asm volatile( + " stg %1,%0\n" + : "=Q" (v->counter) : "d" (i)); } static inline long long atomic64_add_return(long long i, atomic64_t *v) -- cgit v1.2.3-18-g5258 From dd30ac327481f1663b6526fa9fa078569188b7cc Mon Sep 17 00:00:00 2001 From: Horst Hartmann Date: Thu, 17 Feb 2011 13:13:59 +0100 Subject: [S390] net: provide architecture specific NET_SKB_PAD NET_SKB_PAD has been increased from 32 to 64 and later to max(32, L1_CACHE_BYTES). This led to a 25% throughput decrease for streaming workloads accompanied by a 37% CPU cost increase on s390. Define a architecture specific NET_SKB_PAD with the old value of 32. Signed-off-by: Horst Hartmann Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cache.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b64..2a30d5ac066 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h @@ -13,6 +13,7 @@ #define L1_CACHE_BYTES 256 #define L1_CACHE_SHIFT 8 +#define NET_SKB_PAD 32 #define __read_mostly __attribute__((__section__(".data..read_mostly"))) -- cgit v1.2.3-18-g5258