diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 86 |
1 files changed, 43 insertions, 43 deletions
diff --git a/mm/slub.c b/mm/slub.c index 578f68f3c51..fba51d6d4cc 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -107,11 +107,17 @@ * the fast path and disables lockless freelists. */ +#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ + SLAB_TRACE | SLAB_DEBUG_FREE) + +static inline int kmem_cache_debug(struct kmem_cache *s) +{ #ifdef CONFIG_SLUB_DEBUG -#define SLABDEBUG 1 + return unlikely(s->flags & SLAB_DEBUG_FLAGS); #else -#define SLABDEBUG 0 + return 0; #endif +} /* * Issues still to be resolved: @@ -162,8 +168,8 @@ #define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */ /* Internal SLUB flags */ -#define __OBJECT_POISON 0x80000000 /* Poison object */ -#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ +#define __OBJECT_POISON 0x80000000UL /* Poison object */ +#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */ static int kmem_size = sizeof(struct kmem_cache); @@ -1073,7 +1079,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node, flags |= __GFP_NOTRACK; - if (node == -1) + if (node == NUMA_NO_NODE) return alloc_pages(flags, order); else return alloc_pages_exact_node(node, flags, order); @@ -1157,9 +1163,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) inc_slabs_node(s, page_to_nid(page), page->objects); page->slab = s; page->flags |= 1 << PG_slab; - if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON | - SLAB_STORE_USER | SLAB_TRACE)) - __SetPageSlubDebug(page); start = page_address(page); @@ -1186,14 +1189,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page) int order = compound_order(page); int pages = 1 << order; - if (unlikely(SLABDEBUG && PageSlubDebug(page))) { + if (kmem_cache_debug(s)) { void *p; slab_pad_check(s, page); for_each_object(p, s, page_address(page), page->objects) check_object(s, page, p, 0); - __ClearPageSlubDebug(page); } kmemcheck_free_shadow(page, compound_order(page)); @@ -1387,10 +1389,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags) static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; - int searchnode = (node == -1) ? numa_node_id() : node; + int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; page = get_partial_node(get_node(s, searchnode)); - if (page || (flags & __GFP_THISNODE)) + if (page || node != -1) return page; return get_any_partial(s, flags); @@ -1415,8 +1417,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail) stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD); } else { stat(s, DEACTIVATE_FULL); - if (SLABDEBUG && PageSlubDebug(page) && - (s->flags & SLAB_STORE_USER)) + if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) add_full(n, page); } slab_unlock(page); @@ -1515,7 +1516,7 @@ static void flush_all(struct kmem_cache *s) static inline int node_match(struct kmem_cache_cpu *c, int node) { #ifdef CONFIG_NUMA - if (node != -1 && c->node != node) + if (node != NUMA_NO_NODE && c->node != node) return 0; #endif return 1; @@ -1624,7 +1625,7 @@ load_freelist: object = c->page->freelist; if (unlikely(!object)) goto another_slab; - if (unlikely(SLABDEBUG && PageSlubDebug(c->page))) + if (kmem_cache_debug(s)) goto debug; c->freelist = get_freepointer(s, object); @@ -1727,7 +1728,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { - void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_); + void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); @@ -1738,7 +1739,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_TRACING void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) { - return slab_alloc(s, gfpflags, -1, _RET_IP_); + return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_alloc_notrace); #endif @@ -1783,7 +1784,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, stat(s, FREE_SLOWPATH); slab_lock(page); - if (unlikely(SLABDEBUG && PageSlubDebug(page))) + if (kmem_cache_debug(s)) goto debug; checks_ok: @@ -2490,7 +2491,6 @@ void kmem_cache_destroy(struct kmem_cache *s) s->refcount--; if (!s->refcount) { list_del(&s->list); - up_write(&slub_lock); if (kmem_cache_close(s)) { printk(KERN_ERR "SLUB %s: %s called for cache that " "still has objects.\n", s->name, __func__); @@ -2499,8 +2499,8 @@ void kmem_cache_destroy(struct kmem_cache *s) if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); sysfs_slab_remove(s); - } else - up_write(&slub_lock); + } + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -2728,7 +2728,7 @@ void *__kmalloc(size_t size, gfp_t flags) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, flags, -1, _RET_IP_); + ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, flags); @@ -3118,9 +3118,12 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) - kmalloc_caches[i]. name = - kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); + for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); + + BUG_ON(!s); + kmalloc_caches[i].name = s; + } #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); @@ -3223,14 +3226,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, */ s->objsize = max(s->objsize, (int)size); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); - up_write(&slub_lock); if (sysfs_slab_alias(s, name)) { - down_write(&slub_lock); s->refcount--; - up_write(&slub_lock); goto err; } + up_write(&slub_lock); return s; } @@ -3239,14 +3240,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, if (kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor)) { list_add(&s->list, &slab_caches); - up_write(&slub_lock); if (sysfs_slab_add(s)) { - down_write(&slub_lock); list_del(&s->list); - up_write(&slub_lock); kfree(s); goto err; } + up_write(&slub_lock); return s; } kfree(s); @@ -3312,7 +3311,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) if (unlikely(ZERO_OR_NULL_PTR(s))) return s; - ret = slab_alloc(s, gfpflags, -1, caller); + ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); /* Honor the call site pointer we recieved. */ trace_kmalloc(caller, ret, size, s->size, gfpflags); @@ -3395,16 +3394,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page, } else printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", s->name, page); - - if (s->flags & DEBUG_DEFAULT_FLAGS) { - if (!PageSlubDebug(page)) - printk(KERN_ERR "SLUB %s: SlubDebug not set " - "on slab 0x%p\n", s->name, page); - } else { - if (PageSlubDebug(page)) - printk(KERN_ERR "SLUB %s: SlubDebug set on " - "slab 0x%p\n", s->name, page); - } } static int validate_slab_node(struct kmem_cache *s, @@ -4504,6 +4493,13 @@ static int sysfs_slab_add(struct kmem_cache *s) static void sysfs_slab_remove(struct kmem_cache *s) { + if (slab_state < SYSFS) + /* + * Sysfs has not been setup yet so no need to remove the + * cache from sysfs. + */ + return; + kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_del(&s->kobj); kobject_put(&s->kobj); @@ -4549,8 +4545,11 @@ static int __init slab_sysfs_init(void) struct kmem_cache *s; int err; + down_write(&slub_lock); + slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); if (!slab_kset) { + up_write(&slub_lock); printk(KERN_ERR "Cannot register slab subsystem.\n"); return -ENOSYS; } @@ -4575,6 +4574,7 @@ static int __init slab_sysfs_init(void) kfree(al); } + up_write(&slub_lock); resiliency_test(); return 0; } |