diff options
Diffstat (limited to 'include/asm-generic/percpu.h')
| -rw-r--r-- | include/asm-generic/percpu.h | 113 |
1 files changed, 53 insertions, 60 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 00f45ff081a..0703aa75b5e 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -1,13 +1,9 @@ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ + #include <linux/compiler.h> #include <linux/threads.h> - -/* - * Determine the real variable name from the name visible in the - * kernel sources. - */ -#define per_cpu_var(var) per_cpu__##var +#include <linux/percpu-defs.h> #ifdef CONFIG_SMP @@ -45,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; * Only S390 provides its own means of moving the pointer. */ #ifndef SHIFT_PERCPU_PTR -#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset)) +/* Weird cast keeps both GCC and sparse happy. */ +#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ + __verify_pcpu_ptr((__p)); \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ +}) #endif /* @@ -54,12 +54,19 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; * offset. */ #define per_cpu(var, cpu) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu))) -#define __get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset)) -#define __raw_get_cpu_var(var) \ - (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) + (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) + +#ifndef raw_cpu_ptr +#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -67,69 +74,55 @@ extern void setup_per_cpu_areas(void); #else /* ! SMP */ -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) -#define __get_cpu_var(var) per_cpu_var(var) -#define __raw_get_cpu_var(var) per_cpu_var(var) +#define VERIFY_PERCPU_PTR(__p) ({ \ + __verify_pcpu_ptr((__p)); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) +#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ -#ifndef PER_CPU_ATTRIBUTES -#define PER_CPU_ATTRIBUTES +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" +#else +#define PER_CPU_BASE_SECTION ".data" #endif - -#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ - __typeof__(type) per_cpu_var(name) - -/* - * Optional methods for optimized non-lvalue per-cpu variable access. - * - * @var can be a percpu variable or a field of it and its size should - * equal char, int or long. percpu_read() evaluates to a lvalue and - * all others to void. - * - * These operations are guaranteed to be atomic w.r.t. preemption. - * The generic versions use plain get/put_cpu_var(). Archs are - * encouraged to implement single-instruction alternatives which don't - * require preemption protection. - */ -#ifndef percpu_read -# define percpu_read(var) \ - ({ \ - typeof(per_cpu_var(var)) __tmp_var__; \ - __tmp_var__ = get_cpu_var(var); \ - put_cpu_var(var); \ - __tmp_var__; \ - }) #endif -#define __percpu_generic_to_op(var, val, op) \ -do { \ - get_cpu_var(var) op val; \ - put_cpu_var(var); \ -} while (0) +#ifdef CONFIG_SMP -#ifndef percpu_write -# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" #endif +#define PER_CPU_FIRST_SECTION "..first" -#ifndef percpu_add -# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) -#endif +#else -#ifndef percpu_sub -# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) -#endif +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" -#ifndef percpu_and -# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) #endif -#ifndef percpu_or -# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES #endif -#ifndef percpu_xor -# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES #endif +/* Keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr raw_cpu_ptr + #endif /* _ASM_GENERIC_PERCPU_H_ */ |
