diff options
author | Tejun Heo <tj@kernel.org> | 2009-06-24 15:13:53 +0900 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-06-24 15:13:53 +0900 |
commit | 9a0ef2923abd2cc2c6f78d3663ac7af34c0220e8 (patch) | |
tree | ba4377fdf8d6d61c4246275e97961ece1085492c /arch/s390/include/asm/percpu.h | |
parent | 9b7dbc7dc0365a943af2d73b1376a6f0aac5dc0d (diff) |
s390: switch to dynamic percpu allocator
64bit s390 shares the same problem with alpha regarding percpu symbol
addressing from modules. It needs assembly magic to force GOTENT
reference when building module as the percpu address will be outside
the usual 4G range from the module text. This can be solved by using
weak percpu variable definitions.
This patch makes s390 use weak definitions and switch to dynamic
percpu allocator. Please note that weak attribute is not added if
!SMP as percpu variables behave exactly the same as normal variables
on UP.
Compile tested. Generation of GOTENT reference verified.
This patch is based on Ivan Kokshaysky's alpha percpu patch.
[ Impact: use dynamic percpu allocator ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/percpu.h')
-rw-r--r-- | arch/s390/include/asm/percpu.h | 32 |
1 files changed, 8 insertions, 24 deletions
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 408d60b4f75..f7ad8719d02 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -1,37 +1,21 @@ #ifndef __ARCH_S390_PERCPU__ #define __ARCH_S390_PERCPU__ -#include <linux/compiler.h> -#include <asm/lowcore.h> - /* * s390 uses its own implementation for per cpu data, the offset of * the cpu local data area is cached in the cpu's lowcore memory. - * For 64 bit module code s390 forces the use of a GOT slot for the - * address of the per cpu variable. This is needed because the module - * may be more than 4G above the per cpu area. */ -#if defined(__s390x__) && defined(MODULE) - -#define SHIFT_PERCPU_PTR(ptr,offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long *__ptr; \ - asm ( "larl %0, %1@GOTENT" \ - : "=a" (__ptr) : "X" (ptr) ); \ - (typeof(ptr))((*__ptr) + (offset)); })) - -#else - -#define SHIFT_PERCPU_PTR(ptr, offset) (({ \ - extern int simple_identifier_##var(void); \ - unsigned long __ptr; \ - asm ( "" : "=a" (__ptr) : "0" (ptr) ); \ - (typeof(ptr)) (__ptr + (offset)); })) +#define __my_cpu_offset S390_lowcore.percpu_offset +/* + * For 64 bit module code, the module may be more than 4G above the + * per cpu area, use weak definitions to force the compiler to + * generate external references. + */ +#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE) +#define ARCH_NEEDS_WEAK_PER_CPU #endif -#define __my_cpu_offset S390_lowcore.percpu_offset - #include <asm-generic/percpu.h> #endif /* __ARCH_S390_PERCPU__ */ |