From e7cb55b946a2182c347047dc903c6ed0daef100c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:08 +0000 Subject: kmemleak: Do not use off-slab management with SLAB_NOLEAKTRACE With the slab allocator, if off-slab management is enabled for the kmem_caches used by kmemleak, it leads to recursive calls into kmemleak_alloc(). Off-slab management can be triggered by other config options increasing the slab size, e.g. DEBUG_PAGEALLOC. Reported-by: Tetsuo Handa Reviewed-by: Pekka Enberg Cc: Christoph Lameter Signed-off-by: Catalin Marinas --- mm/slab.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..646db308519 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2261,9 +2261,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* * Determine if the slab management is 'on' or 'off' slab. * (bootstrapping cannot cope with offslab caches so don't do - * it too early on.) + * it too early on. Always use on-slab management when + * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) */ - if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init) + if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init && + !(flags & SLAB_NOLEAKTRACE)) /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). -- cgit v1.2.3-18-g5258 From c017b4be3e84176cab10eca5e6c4faeb8cfc6f3e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 28 Oct 2009 13:33:09 +0000 Subject: kmemleak: Simplify the kmemleak_scan_area() function prototype This function was taking non-necessary arguments which can be determined by kmemleak. The patch also modifies the calling sites. Signed-off-by: Catalin Marinas Cc: Pekka Enberg Cc: Christoph Lameter Cc: Rusty Russell --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 646db308519..d2713a944eb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2584,8 +2584,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, * kmemleak does not treat the ->s_mem pointer as a reference * to the object. Otherwise we will not report the leak. */ - kmemleak_scan_area(slabp, offsetof(struct slab, list), - sizeof(struct list_head), local_flags); + kmemleak_scan_area(&slabp->list, sizeof(struct list_head), + local_flags); if (!slabp) return NULL; } else { -- cgit v1.2.3-18-g5258 From 1871e52c76dd95895caeb772f845a1718dcbcd75 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 29 Oct 2009 22:34:13 +0900 Subject: percpu: make percpu symbols under kernel/ and mm/ unique This patch updates percpu related symbols under kernel/ and mm/ such that percpu symbols are unique and don't clash with local symbols. This serves two purposes of decreasing the possibility of global percpu symbol collision and allowing dropping per_cpu__ prefix from percpu symbols. * kernel/lockdep.c: s/lock_stats/cpu_lock_stats/ * kernel/sched.c: s/init_rq_rt/init_rt_rq_var/ (any better idea?) s/sched_group_cpus/sched_groups/ * kernel/softirq.c: s/ksoftirqd/run_ksoftirqd/a * kernel/softlockup.c: s/(*)_timestamp/softlockup_\1_ts/ s/watchdog_task/softlockup_watchdog/ s/timestamp/ts/ for local variables * kernel/time/timer_stats: s/lookup_lock/tstats_lookup_lock/ * mm/slab.c: s/reap_work/slab_reap_work/ s/reap_node/slab_reap_node/ * mm/vmstat.c: local variable changed to avoid collision with vmstat_work Partly based on Rusty Russell's "alloc_percpu: rename percpu vars which cause name clashes" patch. Signed-off-by: Tejun Heo Acked-by: (slab/vmstat) Christoph Lameter Reviewed-by: Christoph Lameter Cc: Rusty Russell Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andrew Morton Cc: Nick Piggin --- mm/slab.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..211b1746c63 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -685,7 +685,7 @@ int slab_is_available(void) return g_cpucache_up >= EARLY; } -static DEFINE_PER_CPU(struct delayed_work, reap_work); +static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { @@ -826,7 +826,7 @@ __setup("noaliencache", noaliencache_setup); * objects freed on different nodes from which they were allocated) and the * flushing of remote pcps by calling drain_node_pages. */ -static DEFINE_PER_CPU(unsigned long, reap_node); +static DEFINE_PER_CPU(unsigned long, slab_reap_node); static void init_reap_node(int cpu) { @@ -836,17 +836,17 @@ static void init_reap_node(int cpu) if (node == MAX_NUMNODES) node = first_node(node_online_map); - per_cpu(reap_node, cpu) = node; + per_cpu(slab_reap_node, cpu) = node; } static void next_reap_node(void) { - int node = __get_cpu_var(reap_node); + int node = __get_cpu_var(slab_reap_node); node = next_node(node, node_online_map); if (unlikely(node >= MAX_NUMNODES)) node = first_node(node_online_map); - __get_cpu_var(reap_node) = node; + __get_cpu_var(slab_reap_node) = node; } #else @@ -863,7 +863,7 @@ static void next_reap_node(void) */ static void __cpuinit start_cpu_timer(int cpu) { - struct delayed_work *reap_work = &per_cpu(reap_work, cpu); + struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), @@ -1027,7 +1027,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, */ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) { - int node = __get_cpu_var(reap_node); + int node = __get_cpu_var(slab_reap_node); if (l3->alien) { struct array_cache *ac = l3->alien[node]; @@ -1286,9 +1286,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, * anything expensive but will only modify reap_work * and reschedule the timer. */ - cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); + cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); /* Now the cache_reaper is guaranteed to be not running. */ - per_cpu(reap_work, cpu).work.func = NULL; + per_cpu(slab_reap_work, cpu).work.func = NULL; break; case CPU_DOWN_FAILED: case CPU_DOWN_FAILED_FROZEN: -- cgit v1.2.3-18-g5258 From ce79ddc8e2376a9a93c7d42daf89bfcbb9187e62 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Mon, 23 Nov 2009 22:01:15 +0200 Subject: SLAB: Fix lockdep annotations for CPU hotplug As reported by Paul McKenney: I am seeing some lockdep complaints in rcutorture runs that include frequent CPU-hotplug operations. The tests are otherwise successful. My first thought was to send a patch that gave each array_cache structure's ->lock field its own struct lock_class_key, but you already have a init_lock_keys() that seems to be intended to deal with this. ------------------------------------------------------------------------ ============================================= [ INFO: possible recursive locking detected ] 2.6.32-rc4-autokern1 #1 --------------------------------------------- syslogd/2908 is trying to acquire lock: (&nc->lock){..-...}, at: [] .kmem_cache_free+0x118/0x2d4 but task is already holding lock: (&nc->lock){..-...}, at: [] .kfree+0x1f0/0x324 other info that might help us debug this: 3 locks held by syslogd/2908: #0: (&u->readlock){+.+.+.}, at: [] .unix_dgram_recvmsg+0x70/0x338 #1: (&nc->lock){..-...}, at: [] .kfree+0x1f0/0x324 #2: (&parent->list_lock){-.-...}, at: [] .__drain_alien_cache+0x50/0xb8 stack backtrace: Call Trace: [c0000000e8ccafc0] [c0000000000101e4] .show_stack+0x70/0x184 (unreliable) [c0000000e8ccb070] [c0000000000afebc] .validate_chain+0x6ec/0xf58 [c0000000e8ccb180] [c0000000000b0ff0] .__lock_acquire+0x8c8/0x974 [c0000000e8ccb280] [c0000000000b2290] .lock_acquire+0x140/0x18c [c0000000e8ccb350] [c000000000468df0] ._spin_lock+0x48/0x70 [c0000000e8ccb3e0] [c0000000001407f4] .kmem_cache_free+0x118/0x2d4 [c0000000e8ccb4a0] [c000000000140b90] .free_block+0x130/0x1a8 [c0000000e8ccb540] [c000000000140f94] .__drain_alien_cache+0x80/0xb8 [c0000000e8ccb5e0] [c0000000001411e0] .kfree+0x214/0x324 [c0000000e8ccb6a0] [c0000000003ca860] .skb_release_data+0xe8/0x104 [c0000000e8ccb730] [c0000000003ca2ec] .__kfree_skb+0x20/0xd4 [c0000000e8ccb7b0] [c0000000003cf2c8] .skb_free_datagram+0x1c/0x5c [c0000000e8ccb830] [c00000000045597c] .unix_dgram_recvmsg+0x2f4/0x338 [c0000000e8ccb920] [c0000000003c0f14] .sock_recvmsg+0xf4/0x13c [c0000000e8ccbb30] [c0000000003c28ec] .SyS_recvfrom+0xb4/0x130 [c0000000e8ccbcb0] [c0000000003bfb78] .sys_recv+0x18/0x2c [c0000000e8ccbd20] [c0000000003ed388] .compat_sys_recv+0x14/0x28 [c0000000e8ccbd90] [c0000000003ee1bc] .compat_sys_socketcall+0x178/0x220 [c0000000e8ccbe30] [c0000000000085d4] syscall_exit+0x0/0x40 This patch fixes the issue by setting up lockdep annotations during CPU hotplug. Reported-by: Paul E. McKenney Tested-by: Paul E. McKenney Cc: Peter Zijlstra Cc: Christoph Lameter Signed-off-by: Pekka Enberg --- mm/slab.c | 108 +++++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 61 insertions(+), 47 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..84de47e350d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = { #define BAD_ALIEN_MAGIC 0x01020304ul +/* + * chicken and egg problem: delay the per-cpu array allocation + * until the general caches are up. + */ +static enum { + NONE, + PARTIAL_AC, + PARTIAL_L3, + EARLY, + FULL +} g_cpucache_up; + +/* + * used by boot code to determine if it can use slab based allocator + */ +int slab_is_available(void) +{ + return g_cpucache_up >= EARLY; +} + #ifdef CONFIG_LOCKDEP /* @@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = { static struct lock_class_key on_slab_l3_key; static struct lock_class_key on_slab_alc_key; -static inline void init_lock_keys(void) - +static void init_node_lock_keys(int q) { - int q; struct cache_sizes *s = malloc_sizes; - while (s->cs_size != ULONG_MAX) { - for_each_node(q) { - struct array_cache **alc; - int r; - struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; - if (!l3 || OFF_SLAB(s->cs_cachep)) - continue; - lockdep_set_class(&l3->list_lock, &on_slab_l3_key); - alc = l3->alien; - /* - * FIXME: This check for BAD_ALIEN_MAGIC - * should go away when common slab code is taught to - * work even without alien caches. - * Currently, non NUMA code returns BAD_ALIEN_MAGIC - * for alloc_alien_cache, - */ - if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) - continue; - for_each_node(r) { - if (alc[r]) - lockdep_set_class(&alc[r]->lock, - &on_slab_alc_key); - } + if (g_cpucache_up != FULL) + return; + + for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) { + struct array_cache **alc; + struct kmem_list3 *l3; + int r; + + l3 = s->cs_cachep->nodelists[q]; + if (!l3 || OFF_SLAB(s->cs_cachep)) + return; + lockdep_set_class(&l3->list_lock, &on_slab_l3_key); + alc = l3->alien; + /* + * FIXME: This check for BAD_ALIEN_MAGIC + * should go away when common slab code is taught to + * work even without alien caches. + * Currently, non NUMA code returns BAD_ALIEN_MAGIC + * for alloc_alien_cache, + */ + if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) + return; + for_each_node(r) { + if (alc[r]) + lockdep_set_class(&alc[r]->lock, + &on_slab_alc_key); } - s++; } } + +static inline void init_lock_keys(void) +{ + int node; + + for_each_node(node) + init_node_lock_keys(node); +} #else +static void init_node_lock_keys(int q) +{ +} + static inline void init_lock_keys(void) { } @@ -665,26 +697,6 @@ static inline void init_lock_keys(void) static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; -/* - * chicken and egg problem: delay the per-cpu array allocation - * until the general caches are up. - */ -static enum { - NONE, - PARTIAL_AC, - PARTIAL_L3, - EARLY, - FULL -} g_cpucache_up; - -/* - * used by boot code to determine if it can use slab based allocator - */ -int slab_is_available(void) -{ - return g_cpucache_up >= EARLY; -} - static DEFINE_PER_CPU(struct delayed_work, reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) @@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu) kfree(shared); free_alien_cache(alien); } + init_node_lock_keys(node); + return 0; bad: cpuup_canceled(cpu); -- cgit v1.2.3-18-g5258 From 8e15b79cf4bd20c6afb4663d98a39cd004eee672 Mon Sep 17 00:00:00 2001 From: Tim Blechmann Date: Mon, 30 Nov 2009 18:59:34 +0100 Subject: SLAB: Fix unlikely() annotation in __cache_alloc_node() Branch profiling on my nehalem machine showed 99% incorrect branch hints: 28459 7678524 99 __cache_alloc_node slab.c 3551 Discussion on lkml [1] led to the solution to remove this hint. [1] http://patchwork.kernel.org/patch/63517/ Signed-off-by: Tim Blechmann Signed-off-by: Pekka Enberg --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 84de47e350d..a07540e5843 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3320,7 +3320,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (unlikely(nodeid == -1)) + if (nodeid == -1) nodeid = numa_node_id(); if (unlikely(!cachep->nodelists[nodeid])) { -- cgit v1.2.3-18-g5258 From f3d8b53a3abbfd0b74fa5dfaa690870d9619fad9 Mon Sep 17 00:00:00 2001 From: "J. R. Okajima" Date: Wed, 2 Dec 2009 16:55:49 +0900 Subject: slab, kmemleak: stop calling kmemleak_erase() unconditionally When the gotten object is NULL (probably due to ENOMEM), kmemleak_erase() is unnecessary here, It just sets NULL to where already is NULL. Add a condition. Acked-by: Catalin Marinas Signed-off-by: J. R. Okajima Signed-off-by: Pekka Enberg --- mm/slab.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..4e61449d794 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3109,7 +3109,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) * per-CPU caches is leaked, we need to make sure kmemleak doesn't * treat the array pointers as a reference to the object. */ - kmemleak_erase(&ac->entry[ac->avail]); + if (objp) + kmemleak_erase(&ac->entry[ac->avail]); return objp; } -- cgit v1.2.3-18-g5258 From ddbf2e8366f2a7fa3419be418cfd83a914d2527f Mon Sep 17 00:00:00 2001 From: "J. R. Okajima" Date: Wed, 2 Dec 2009 16:55:50 +0900 Subject: slab, kmemleak: pass the correct pointer to kmemleak_erase() In ____cache_alloc(), the variable 'ac' may be changed after cache_alloc_refill() and the following kmemleak_erase() may get an incorrect pointer. Update 'ac' after cache_alloc_refill() unconditionally. See the following URL for the discussion of this patch: http://marc.info/?l=linux-kernel&m=125873373124187&w=2 Acked-by: Catalin Marinas Signed-off-by: J. R. Okajima Signed-off-by: Pekka Enberg --- mm/slab.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 4e61449d794..66e90477a4b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3103,6 +3103,11 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) } else { STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); + /* + * the 'ac' may be updated by cache_alloc_refill(), + * and kmemleak_erase() requires its correct value. + */ + ac = cpu_cache_get(cachep); } /* * To avoid a false negative, if an object that is in one of the -- cgit v1.2.3-18-g5258 From 0f24f1287a86b198c1e4bd4ce45e8565e40ff804 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 11 Dec 2009 15:45:30 +0800 Subject: tracing, slab: Define kmem_cache_alloc_notrace ifdef CONFIG_TRACING Define kmem_trace_alloc_{,node}_notrace() if CONFIG_TRACING is enabled, otherwise perf-kmem will show wrong stats ifndef CONFIG_KMEM_TRACE, because a kmalloc() memory allocation may be traced by both trace_kmalloc() and trace_kmem_cache_alloc(). Signed-off-by: Li Zefan Reviewed-by: Pekka Enberg Cc: Christoph Lameter Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: linux-mm@kvack.org Cc: Eduard - Gabriel Munteanu LKML-Reference: <4B21F89A.7000801@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7dfa481c96b..9733bb4009d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp) #endif -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING size_t slab_buffer_size(struct kmem_cache *cachep) { return cachep->buffer_size; @@ -3558,7 +3558,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) } EXPORT_SYMBOL(kmem_cache_alloc); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) { return __cache_alloc(cachep, flags, __builtin_return_address(0)); @@ -3621,7 +3621,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) } EXPORT_SYMBOL(kmem_cache_alloc_node); -#ifdef CONFIG_KMEMTRACE +#ifdef CONFIG_TRACING void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid) -- cgit v1.2.3-18-g5258 From 0bb38a5cdeb39f543657ec6fb9950343d2de6918 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 11 Dec 2009 15:45:50 +0800 Subject: tracing, slab: Fix no callsite ifndef CONFIG_KMEMTRACE For slab, if CONFIG_KMEMTRACE and CONFIG_DEBUG_SLAB are not set, __do_kmalloc() will not track callers: # ./perf record -f -a -R -e kmem:kmalloc ^C # ./perf trace ... perf-2204 [000] 147.376774: kmalloc: call_site=c0529d2d ... perf-2204 [000] 147.400997: kmalloc: call_site=c0529d2d ... Xorg-1461 [001] 147.405413: kmalloc: call_site=0 ... Xorg-1461 [001] 147.405609: kmalloc: call_site=0 ... konsole-1776 [001] 147.405786: kmalloc: call_site=0 ... Signed-off-by: Li Zefan Reviewed-by: Pekka Enberg Cc: Christoph Lameter Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: linux-mm@kvack.org Cc: Eduard - Gabriel Munteanu LKML-Reference: <4B21F8AE.6020804@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- mm/slab.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 9733bb4009d..c3d092dca03 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3649,7 +3649,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) return ret; } -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, @@ -3669,7 +3669,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) return __do_kmalloc_node(size, flags, node, NULL); } EXPORT_SYMBOL(__kmalloc_node); -#endif /* CONFIG_DEBUG_SLAB */ +#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_NUMA */ /** @@ -3701,7 +3701,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, } -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, __builtin_return_address(0)); -- cgit v1.2.3-18-g5258 From 58463c1fe25f7c4183f30f06a5a86cb6cd9d8231 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 17 Dec 2009 11:43:12 -0600 Subject: cpumask: avoid deprecated function in mm/slab.c These days we use cpumask_empty() which takes a pointer. Signed-off-by: Rusty Russell Acked-by: Christoph Lameter --- mm/slab.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 3f4822938f4..7560eb00637 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1132,7 +1132,7 @@ static void __cpuinit cpuup_canceled(long cpu) if (nc) free_block(cachep, nc->entry, nc->avail, node); - if (!cpus_empty(*mask)) { + if (!cpumask_empty(mask)) { spin_unlock_irq(&l3->list_lock); goto free_array_cache; } -- cgit v1.2.3-18-g5258 From 00afa758067ac1c947149ef766adcdfe30c44d7d Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Sun, 27 Dec 2009 14:33:14 +0200 Subject: SLAB: Fix lockdep annotation breakage Commit ce79ddc8e2376a9a93c7d42daf89bfcbb9187e62 ("SLAB: Fix lockdep annotations for CPU hotplug") broke init_node_lock_keys() off-slab logic which causes lockdep false positives. Fix that up by reverting the logic back to original while keeping CPU hotplug fixes intact. Reported-and-tested-by: Heiko Carstens Reported-and-tested-by: Andi Kleen Signed-off-by: Pekka Enberg --- mm/slab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/slab.c') diff --git a/mm/slab.c b/mm/slab.c index 7d41f15b48d..7451bdacaf1 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -654,7 +654,7 @@ static void init_node_lock_keys(int q) l3 = s->cs_cachep->nodelists[q]; if (!l3 || OFF_SLAB(s->cs_cachep)) - return; + continue; lockdep_set_class(&l3->list_lock, &on_slab_l3_key); alc = l3->alien; /* @@ -665,7 +665,7 @@ static void init_node_lock_keys(int q) * for alloc_alien_cache, */ if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) - return; + continue; for_each_node(r) { if (alc[r]) lockdep_set_class(&alc[r]->lock, -- cgit v1.2.3-18-g5258