diff options
Diffstat (limited to 'include/asm-generic/percpu.h')
| -rw-r--r-- | include/asm-generic/percpu.h | 136 |
1 files changed, 111 insertions, 25 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 9044aeb3782..0703aa75b5e 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -1,42 +1,128 @@ #ifndef _ASM_GENERIC_PERCPU_H_ #define _ASM_GENERIC_PERCPU_H_ + #include <linux/compiler.h> +#include <linux/threads.h> +#include <linux/percpu-defs.h> -#define __GENERIC_PER_CPU #ifdef CONFIG_SMP +/* + * per_cpu_offset() is the offset that has to be added to a + * percpu variable to get to the instance for a certain processor. + * + * Most arches use the __per_cpu_offset array for those offsets but + * some arches have their own ways of determining the offset (x86_64, s390). + */ +#ifndef __per_cpu_offset extern unsigned long __per_cpu_offset[NR_CPUS]; -/* Separate out the type, so (int[3], foo) works. */ -#define DEFINE_PER_CPU(type, name) \ - __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name - -/* var is in discarded region: offset to particular copy we want */ -#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) -#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) - -/* A macro to avoid #include hell... */ -#define percpu_modcopy(pcpudst, src, size) \ -do { \ - unsigned int __i; \ - for (__i = 0; __i < NR_CPUS; __i++) \ - if (cpu_possible(__i)) \ - memcpy((pcpudst)+__per_cpu_offset[__i], \ - (src), (size)); \ -} while (0) +#define per_cpu_offset(x) (__per_cpu_offset[x]) +#endif + +/* + * Determine the offset for the currently active processor. + * An arch may define __my_cpu_offset to provide a more effective + * means of obtaining the offset to the per cpu variables of the + * current processor. + */ +#ifndef __my_cpu_offset +#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id()) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define my_cpu_offset per_cpu_offset(smp_processor_id()) +#else +#define my_cpu_offset __my_cpu_offset +#endif + +/* + * Add a offset to a pointer but keep the pointer as is. + * + * Only S390 provides its own means of moving the pointer. + */ +#ifndef SHIFT_PERCPU_PTR +/* Weird cast keeps both GCC and sparse happy. */ +#define SHIFT_PERCPU_PTR(__p, __offset) ({ \ + __verify_pcpu_ptr((__p)); \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \ +}) +#endif + +/* + * A percpu variable may point to a discarded regions. The following are + * established ways to produce a usable pointer from the percpu variable + * offset. + */ +#define per_cpu(var, cpu) \ + (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) + +#ifndef raw_cpu_ptr +#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) +#endif +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif + +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +extern void setup_per_cpu_areas(void); +#endif + #else /* ! SMP */ -#define DEFINE_PER_CPU(type, name) \ - __typeof__(type) per_cpu__##name +#define VERIFY_PERCPU_PTR(__p) ({ \ + __verify_pcpu_ptr((__p)); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) -#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) -#define __get_cpu_var(var) per_cpu__##var +#define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) +#define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define raw_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ -#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name +#ifndef PER_CPU_BASE_SECTION +#ifdef CONFIG_SMP +#define PER_CPU_BASE_SECTION ".data..percpu" +#else +#define PER_CPU_BASE_SECTION ".data" +#endif +#endif + +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION "..first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + +#ifndef PER_CPU_ATTRIBUTES +#define PER_CPU_ATTRIBUTES +#endif + +#ifndef PER_CPU_DEF_ATTRIBUTES +#define PER_CPU_DEF_ATTRIBUTES +#endif -#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) -#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) +/* Keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr raw_cpu_ptr #endif /* _ASM_GENERIC_PERCPU_H_ */ |
