diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-generic/percpu.h | 5 | ||||
-rw-r--r-- | include/linux/percpu-defs.h | 1 | ||||
-rw-r--r-- | include/linux/percpu.h | 434 | ||||
-rw-r--r-- | include/linux/vmstat.h | 10 | ||||
-rw-r--r-- | include/net/neighbour.h | 7 | ||||
-rw-r--r-- | include/net/netfilter/nf_conntrack.h | 4 | ||||
-rw-r--r-- | include/net/snmp.h | 50 |
7 files changed, 439 insertions, 72 deletions
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 90079c373f1..8087b90d467 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -56,6 +56,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define __raw_get_cpu_var(var) \ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset)) +#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) +#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) + #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void setup_per_cpu_areas(void); @@ -66,6 +69,8 @@ extern void setup_per_cpu_areas(void); #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var))) #define __get_cpu_var(var) per_cpu_var(var) #define __raw_get_cpu_var(var) per_cpu_var(var) +#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) #endif /* SMP */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 9bd03193ecd..5a5d6ce4bd5 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -60,6 +60,7 @@ #define DEFINE_PER_CPU_SECTION(type, name, sec) \ __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ + extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ __typeof__(type) per_cpu__##name diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 878836ca999..cf5efbcf716 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -34,8 +34,6 @@ #ifdef CONFIG_SMP -#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) @@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) extern void *__alloc_reserved_percpu(size_t size, size_t align); - -#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - -struct percpu_data { - void *ptrs[1]; -}; - -/* pointer disguising messes up the kmemleak objects tracking */ -#ifndef CONFIG_DEBUG_KMEMLEAK -#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) -#else -#define __percpu_disguise(pdata) (struct percpu_data *)(pdata) -#endif - -#define per_cpu_ptr(ptr, cpu) \ -({ \ - struct percpu_data *__p = __percpu_disguise(ptr); \ - (__typeof__(ptr))__p->ptrs[(cpu)]; \ -}) - -#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */ - extern void *__alloc_percpu(size_t size, size_t align); extern void free_percpu(void *__pdata); +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA extern void __init setup_per_cpu_areas(void); @@ -179,6 +156,11 @@ static inline void free_percpu(void *p) kfree(p); } +static inline phys_addr_t per_cpu_ptr_to_phys(void *addr) +{ + return __pa(addr); +} + static inline void __init setup_per_cpu_areas(void) { } static inline void *pcpu_lpage_remapped(void *kaddr) @@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr) #endif /* CONFIG_SMP */ -#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ - __alignof__(type)) +#define alloc_percpu(type) \ + (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type)) /* * Optional methods for optimized non-lvalue per-cpu variable access. @@ -243,4 +225,404 @@ do { \ # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) #endif +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#define __pcpu_size_call_return(stem, variable) \ +({ typeof(variable) pscr_ret__; \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable);break; \ + case 2: pscr_ret__ = stem##2(variable);break; \ + case 4: pscr_ret__ = stem##4(variable);break; \ + case 8: pscr_ret__ = stem##8(variable);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables (can be determined + * using per_cpu_var(xx). + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The first group is used for accesses that must be done in a + * preemption safe way since we know that the context is not preempt + * safe. Interrupts may occur. If the interrupt modifies the variable + * too then RMW actions will not be reliable. + * + * The arch code can provide optimized functions in two ways: + * + * 1. Override the function completely. F.e. define this_cpu_add(). + * The arch must then ensure that the various scalar format passed + * are handled correctly. + * + * 2. Provide functions for certain scalar sizes. F.e. provide + * this_cpu_add_2() to provide per cpu atomic operations for 2 byte + * sized RMW actions. If arch code does not provide operations for + * a scalar size then the fallback in the generic code will be + * used. + */ + +#define _this_cpu_generic_read(pcp) \ +({ typeof(pcp) ret__; \ + preempt_disable(); \ + ret__ = *this_cpu_ptr(&(pcp)); \ + preempt_enable(); \ + ret__; \ +}) + +#ifndef this_cpu_read +# ifndef this_cpu_read_1 +# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_2 +# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_4 +# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) +# endif +# ifndef this_cpu_read_8 +# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) +# endif +# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) +#endif + +#define _this_cpu_generic_to_op(pcp, val, op) \ +do { \ + preempt_disable(); \ + *__this_cpu_ptr(&pcp) op val; \ + preempt_enable(); \ +} while (0) + +#ifndef this_cpu_write +# ifndef this_cpu_write_1 +# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_2 +# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_4 +# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef this_cpu_write_8 +# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) +# endif +# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) +#endif + +#ifndef this_cpu_add +# ifndef this_cpu_add_1 +# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_2 +# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_4 +# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef this_cpu_add_8 +# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) +#endif + +#ifndef this_cpu_sub +# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val)) +#endif + +#ifndef this_cpu_inc +# define this_cpu_inc(pcp) this_cpu_add((pcp), 1) +#endif + +#ifndef this_cpu_dec +# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) +#endif + +#ifndef this_cpu_and +# ifndef this_cpu_and_1 +# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_2 +# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_4 +# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef this_cpu_and_8 +# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) +#endif + +#ifndef this_cpu_or +# ifndef this_cpu_or_1 +# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_2 +# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_4 +# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef this_cpu_or_8 +# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) +#endif + +#ifndef this_cpu_xor +# ifndef this_cpu_xor_1 +# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_2 +# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_4 +# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef this_cpu_xor_8 +# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) +#endif + +/* + * Generic percpu operations that do not require preemption handling. + * Either we do not care about races or the caller has the + * responsibility of handling preemptions issues. Arch code can still + * override these instructions since the arch per cpu code may be more + * efficient and may actually get race freeness for free (that is the + * case for x86 for example). + * + * If there is no other protection through preempt disable and/or + * disabling interupts then one of these RMW operations can show unexpected + * behavior because the execution thread was rescheduled on another processor + * or an interrupt occurred and the same percpu variable was modified from + * the interrupt context. + */ +#ifndef __this_cpu_read +# ifndef __this_cpu_read_1 +# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_2 +# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_4 +# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# ifndef __this_cpu_read_8 +# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp))) +# endif +# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp)) +#endif + +#define __this_cpu_generic_to_op(pcp, val, op) \ +do { \ + *__this_cpu_ptr(&(pcp)) op val; \ +} while (0) + +#ifndef __this_cpu_write +# ifndef __this_cpu_write_1 +# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_2 +# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_4 +# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# ifndef __this_cpu_write_8 +# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =) +# endif +# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val)) +#endif + +#ifndef __this_cpu_add +# ifndef __this_cpu_add_1 +# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_2 +# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_4 +# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef __this_cpu_add_8 +# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=) +# endif +# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val)) +#endif + +#ifndef __this_cpu_sub +# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val)) +#endif + +#ifndef __this_cpu_inc +# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) +#endif + +#ifndef __this_cpu_dec +# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) +#endif + +#ifndef __this_cpu_and +# ifndef __this_cpu_and_1 +# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_2 +# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_4 +# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef __this_cpu_and_8 +# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=) +# endif +# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val)) +#endif + +#ifndef __this_cpu_or +# ifndef __this_cpu_or_1 +# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_2 +# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_4 +# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef __this_cpu_or_8 +# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=) +# endif +# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val)) +#endif + +#ifndef __this_cpu_xor +# ifndef __this_cpu_xor_1 +# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_2 +# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_4 +# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef __this_cpu_xor_8 +# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val)) +#endif + +/* + * IRQ safe versions of the per cpu RMW operations. Note that these operations + * are *not* safe against modification of the same variable from another + * processors (which one gets when using regular atomic operations) + . They are guaranteed to be atomic vs. local interrupts and + * preemption only. + */ +#define irqsafe_cpu_generic_to_op(pcp, val, op) \ +do { \ + unsigned long flags; \ + local_irq_save(flags); \ + *__this_cpu_ptr(&(pcp)) op val; \ + local_irq_restore(flags); \ +} while (0) + +#ifndef irqsafe_cpu_add +# ifndef irqsafe_cpu_add_1 +# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_2 +# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_4 +# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# ifndef irqsafe_cpu_add_8 +# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=) +# endif +# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val)) +#endif + +#ifndef irqsafe_cpu_sub +# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val)) +#endif + +#ifndef irqsafe_cpu_inc +# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1) +#endif + +#ifndef irqsafe_cpu_dec +# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1) +#endif + +#ifndef irqsafe_cpu_and +# ifndef irqsafe_cpu_and_1 +# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_2 +# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_4 +# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# ifndef irqsafe_cpu_and_8 +# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=) +# endif +# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val)) +#endif + +#ifndef irqsafe_cpu_or +# ifndef irqsafe_cpu_or_1 +# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_2 +# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_4 +# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# ifndef irqsafe_cpu_or_8 +# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=) +# endif +# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val)) +#endif + +#ifndef irqsafe_cpu_xor +# ifndef irqsafe_cpu_xor_1 +# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_2 +# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_4 +# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# ifndef irqsafe_cpu_xor_8 +# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=) +# endif +# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val)) +#endif + #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 2d0f222388a..d85889710f9 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -76,24 +76,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); static inline void __count_vm_event(enum vm_event_item item) { - __get_cpu_var(vm_event_states).event[item]++; + __this_cpu_inc(per_cpu_var(vm_event_states).event[item]); } static inline void count_vm_event(enum vm_event_item item) { - get_cpu_var(vm_event_states).event[item]++; - put_cpu(); + this_cpu_inc(per_cpu_var(vm_event_states).event[item]); } static inline void __count_vm_events(enum vm_event_item item, long delta) { - __get_cpu_var(vm_event_states).event[item] += delta; + __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); } static inline void count_vm_events(enum vm_event_item item, long delta) { - get_cpu_var(vm_event_states).event[item] += delta; - put_cpu(); + this_cpu_add(per_cpu_var(vm_event_states).event[item], delta); } extern void all_vm_events(unsigned long *); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 0302f31a2fb..b0173202cad 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -88,12 +88,7 @@ struct neigh_statistics { unsigned long unres_discards; /* number of unresolved drops */ }; -#define NEIGH_CACHE_STAT_INC(tbl, field) \ - do { \ - preempt_disable(); \ - (per_cpu_ptr((tbl)->stats, smp_processor_id())->field)++; \ - preempt_enable(); \ - } while (0) +#define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) struct neighbour { struct neighbour *next; diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 5cf7270e3ff..a0904adfb8f 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -293,11 +293,11 @@ extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max; #define NF_CT_STAT_INC(net, count) \ - (per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++) + __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) \ do { \ local_bh_disable(); \ - per_cpu_ptr((net)->ct.stat, raw_smp_processor_id())->count++; \ + __this_cpu_inc((net)->ct.stat->count); \ local_bh_enable(); \ } while (0) diff --git a/include/net/snmp.h b/include/net/snmp.h index 8c842e06bec..f0d756f2ac9 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -136,45 +136,31 @@ struct linux_xfrm_mib { #define SNMP_STAT_BHPTR(name) (name[0]) #define SNMP_STAT_USRPTR(name) (name[1]) -#define SNMP_INC_STATS_BH(mib, field) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++) -#define SNMP_INC_STATS_USER(mib, field) \ - do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field]++; \ - put_cpu(); \ - } while (0) -#define SNMP_INC_STATS(mib, field) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]++; \ - put_cpu(); \ - } while (0) -#define SNMP_DEC_STATS(mib, field) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field]--; \ - put_cpu(); \ - } while (0) -#define SNMP_ADD_STATS(mib, field, addend) \ - do { \ - per_cpu_ptr(mib[!in_softirq()], get_cpu())->mibs[field] += addend; \ - put_cpu(); \ - } while (0) -#define SNMP_ADD_STATS_BH(mib, field, addend) \ - (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend) -#define SNMP_ADD_STATS_USER(mib, field, addend) \ - do { \ - per_cpu_ptr(mib[1], get_cpu())->mibs[field] += addend; \ - put_cpu(); \ - } while (0) +#define SNMP_INC_STATS_BH(mib, field) \ + __this_cpu_inc(mib[0]->mibs[field]) +#define SNMP_INC_STATS_USER(mib, field) \ + this_cpu_inc(mib[1]->mibs[field]) +#define SNMP_INC_STATS(mib, field) \ + this_cpu_inc(mib[!in_softirq()]->mibs[field]) +#define SNMP_DEC_STATS(mib, field) \ + this_cpu_dec(mib[!in_softirq()]->mibs[field]) +#define SNMP_ADD_STATS_BH(mib, field, addend) \ + __this_cpu_add(mib[0]->mibs[field], addend) +#define SNMP_ADD_STATS_USER(mib, field, addend) \ + this_cpu_add(mib[1]->mibs[field], addend) #define SNMP_UPD_PO_STATS(mib, basefield, addend) \ do { \ - __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], get_cpu());\ + __typeof__(mib[0]) ptr; \ + preempt_disable(); \ + ptr = this_cpu_ptr((mib)[!in_softirq()]); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend;\ - put_cpu(); \ + preempt_enable(); \ } while (0) #define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ do { \ - __typeof__(mib[0]) ptr = per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id());\ + __typeof__(mib[0]) ptr = \ + __this_cpu_ptr((mib)[!in_softirq()]); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend;\ } while (0) |