From 933393f58fef9963eac61db8093689544e29a600 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 22 Dec 2011 11:58:51 -0600 Subject: percpu: Remove irqsafe_cpu_xxx variants We simply say that regular this_cpu use must be safe regardless of preemption and interrupt state. That has no material change for x86 and s390 implementations of this_cpu operations. However, arches that do not provide their own implementation for this_cpu operations will now get code generated that disables interrupts instead of preemption. -tj: This is part of on-going percpu API cleanup. For detailed discussion of the subject, please refer to the following thread. http://thread.gmane.org/gmane.linux.kernel/1222078 Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo LKML-Reference: --- arch/s390/include/asm/percpu.h | 44 +++++++++++++++++++++--------------------- arch/x86/include/asm/percpu.h | 28 --------------------------- 2 files changed, 22 insertions(+), 50 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h index 5325c89a584..0fbd1899c7b 100644 --- a/arch/s390/include/asm/percpu.h +++ b/arch/s390/include/asm/percpu.h @@ -19,7 +19,7 @@ #define ARCH_NEEDS_WEAK_PER_CPU #endif -#define arch_irqsafe_cpu_to_op(pcp, val, op) \ +#define arch_this_cpu_to_op(pcp, val, op) \ do { \ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ old__, new__, prev__; \ @@ -41,27 +41,27 @@ do { \ preempt_enable(); \ } while (0) -#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) -#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) -#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) -#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +) +#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +) +#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +) +#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +) +#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +) -#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) -#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) -#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) -#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &) +#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &) +#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &) +#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &) +#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &) -#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) -#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) -#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) -#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |) +#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |) +#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |) +#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |) +#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |) -#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) -#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) -#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) -#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^) +#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^) +#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^) +#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^) +#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^) -#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) \ +#define arch_this_cpu_cmpxchg(pcp, oval, nval) \ ({ \ typedef typeof(pcp) pcp_op_T__; \ pcp_op_T__ ret__; \ @@ -79,10 +79,10 @@ do { \ ret__; \ }) -#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) +#define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval) #include diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 3470c9d0ebb..562ccb5323d 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -414,22 +414,6 @@ do { \ #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val) -#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val) -#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val) -#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val) -#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val) -#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val) -#define irqsafe_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val) -#define irqsafe_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val) -#define irqsafe_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val) -#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval) - #ifndef CONFIG_M386 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) #define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) @@ -445,9 +429,6 @@ do { \ #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) #endif /* !CONFIG_M386 */ #ifdef CONFIG_X86_CMPXCHG64 @@ -467,7 +448,6 @@ do { \ #define __this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) #define this_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) -#define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg8b_double(pcp1, o1, o2, n1, n2) #endif /* CONFIG_X86_CMPXCHG64 */ /* @@ -495,13 +475,6 @@ do { \ #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) -#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val) -#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val) -#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) -#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) -#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval) -#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval) - /* * Pretty complex macro to generate cmpxchg16 instruction. The instruction * is not supported on early AMD64 processors so we must be able to emulate @@ -532,7 +505,6 @@ do { \ #define __this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) #define this_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) -#define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, o1, o2, n1, n2) percpu_cmpxchg16b_double(pcp1, o1, o2, n1, n2) #endif -- cgit v1.2.3-18-g5258