diff options
author | Christoph Lameter <cl@linux.com> | 2012-09-04 23:18:33 +0000 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-09-05 12:00:37 +0300 |
commit | 8a13a4cc80bb25c9eab2e7e56bab724fcfa55fce (patch) | |
tree | a212edb3d0b139b0743ca5ca34c14037a6ada4dc | |
parent | 278b1bb1313664d4999a7f7d47a8a8d964862d02 (diff) |
mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists
Do the initial settings of the fields in common code. This will allow us
to push more processing into common code later and improve readability.
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | mm/slab.c | 73 | ||||
-rw-r--r-- | mm/slab.h | 3 | ||||
-rw-r--r-- | mm/slab_common.c | 26 | ||||
-rw-r--r-- | mm/slob.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 39 |
5 files changed, 68 insertions, 81 deletions
diff --git a/mm/slab.c b/mm/slab.c index abc83334e5f..f1f6d54e129 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void) */ sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); - + sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name; + sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size; + sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches); + if (INDEX_AC != INDEX_L3) { sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name; + sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size; + sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches); } @@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void) */ if (!sizes->cs_cachep) { sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); - __kmem_cache_create(sizes->cs_cachep, names->name, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_PANIC, - NULL); + sizes->cs_cachep->name = names->name; + sizes->cs_cachep->size = sizes->cs_size; + sizes->cs_cachep->object_size = sizes->cs_size; + sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN; + __kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC); list_add(&sizes->cs_cachep->list, &slab_caches); } #ifdef CONFIG_ZONE_DMA sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + sizes->cs_dmacachep->name = names->name_dma; + sizes->cs_dmacachep->size = sizes->cs_size; + sizes->cs_dmacachep->object_size = sizes->cs_size; + sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN; __kmem_cache_create(sizes->cs_dmacachep, - names->name_dma, - sizes->cs_size, - ARCH_KMALLOC_MINALIGN, - ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| - SLAB_PANIC, - NULL); + ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC); list_add(&sizes->cs_dmacachep->list, &slab_caches); #endif sizes++; @@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) * as davem. */ int -__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void *)) +__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { size_t left_over, slab_size, ralign; gfp_t gfp; int err; + size_t size = cachep->size; #if DEBUG #if FORCED_DEBUG @@ -2437,8 +2436,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s ralign = ARCH_SLAB_MINALIGN; } /* 3) caller mandated alignment */ - if (ralign < align) { - ralign = align; + if (ralign < cachep->align) { + ralign = cachep->align; } /* disable debug if necessary */ if (ralign > __alignof__(unsigned long long)) @@ -2446,7 +2445,7 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s /* * 4) Store it. */ - align = ralign; + cachep->align = ralign; if (slab_is_available()) gfp = GFP_KERNEL; @@ -2454,8 +2453,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s gfp = GFP_NOWAIT; cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; - cachep->object_size = size; - cachep->align = align; #if DEBUG /* @@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ flags |= CFLGS_OFF_SLAB; - size = ALIGN(size, align); + size = ALIGN(size, cachep->align); - left_over = calculate_slab_order(cachep, size, align, flags); + left_over = calculate_slab_order(cachep, size, cachep->align, flags); - if (!cachep->num) { - printk(KERN_ERR - "kmem_cache_create: couldn't create cache %s.\n", name); + if (!cachep->num) return -E2BIG; - } + slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) - + sizeof(struct slab), align); + + sizeof(struct slab), cachep->align); /* * If the slab has been placed off-slab, and we have enough space then @@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ - if (cachep->colour_off < align) - cachep->colour_off = align; + if (cachep->colour_off < cachep->align) + cachep->colour_off = cachep->align; cachep->colour = left_over / cachep->colour_off; cachep->slab_size = slab_size; cachep->flags = flags; @@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s */ BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache)); } - cachep->ctor = ctor; - cachep->name = name; cachep->refcount = 1; err = setup_cpu_cache(cachep, gfp); diff --git a/mm/slab.h b/mm/slab.h index 077b07a24ef..67aeaa2d39c 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -33,8 +33,7 @@ extern struct list_head slab_caches; extern struct kmem_cache *kmem_cache; /* Functions provided by the slab allocators */ -extern int __kmem_cache_create(struct kmem_cache *, const char *name, - size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); +extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); #ifdef CONFIG_SLUB struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, diff --git a/mm/slab_common.c b/mm/slab_common.c index f50d2ed4fbf..8a85a19d90e 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align { struct kmem_cache *s = NULL; int err = 0; - char *n; get_online_cpus(); mutex_lock(&slab_mutex); @@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align goto out_locked; - n = kstrdup(name, GFP_KERNEL); - if (!n) { - err = -ENOMEM; - goto out_locked; - } - s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_locked; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (s) { - err = __kmem_cache_create(s, n, size, align, flags, ctor); + s->object_size = s->size = size; + s->align = align; + s->ctor = ctor; + s->name = kstrdup(name, GFP_KERNEL); + if (!s->name) { + kmem_cache_free(kmem_cache, s); + err = -ENOMEM; + goto out_locked; + } + + err = __kmem_cache_create(s, flags); if (!err) list_add(&s->list, &slab_caches); else { - kfree(n); + kfree(s->name); kmem_cache_free(kmem_cache, s); } - - } else { - kfree(n); + } else err = -ENOMEM; - } out_locked: mutex_unlock(&slab_mutex); diff --git a/mm/slob.c b/mm/slob.c index 9b0cee1e847..cac05d92f32 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -508,17 +508,15 @@ size_t ksize(const void *block) } EXPORT_SYMBOL(ksize); -int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) { - c->name = name; - c->size = size; + size_t align = c->size; + if (flags & SLAB_DESTROY_BY_RCU) { /* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } c->flags = flags; - c->ctor = ctor; /* ignore alignment unless it's forced */ c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; if (c->align < ARCH_SLAB_MINALIGN) diff --git a/mm/slub.c b/mm/slub.c index 0ad3fffc7d2..d8ee419d5a1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order) } -static int kmem_cache_open(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, - void (*ctor)(void *)) +static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) { - s->name = name; - s->ctor = ctor; - s->object_size = size; - s->align = align; - s->flags = kmem_cache_flags(size, flags, name, ctor); + s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); s->reserved = 0; if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) @@ -3115,7 +3108,7 @@ error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx\n", - s->name, (unsigned long)size, s->size, oo_order(s->oo), + s->name, (unsigned long)s->size, s->size, oo_order(s->oo), s->offset, flags); return -EINVAL; } @@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); + s->name = name; + s->size = s->object_size = size; + s->align = ARCH_KMALLOC_MINALIGN; + /* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slab_mutex here. */ - if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) + if (kmem_cache_open(s, flags)) goto panic; list_add(&s->list, &slab_caches); @@ -3719,9 +3715,10 @@ void __init kmem_cache_init(void) */ kmem_cache_node = (void *)kmem_cache + kmalloc_size; - kmem_cache_open(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache_node->name = "kmem_cache_node"; + kmem_cache_node->size = kmem_cache_node->object_size = + sizeof(struct kmem_cache_node); + kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); @@ -3729,8 +3726,10 @@ void __init kmem_cache_init(void) slab_state = PARTIAL; temp_kmem_cache = kmem_cache; - kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, - 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + kmem_cache->name = "kmem_cache"; + kmem_cache->size = kmem_cache->object_size = kmem_size; + kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC); + kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache, temp_kmem_cache, kmem_size); @@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size, return s; } -int __kmem_cache_create(struct kmem_cache *s, - const char *name, size_t size, - size_t align, unsigned long flags, void (*ctor)(void *)) +int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) { - return kmem_cache_open(s, name, size, align, flags, ctor); + return kmem_cache_open(s, flags); } #ifdef CONFIG_SMP |