diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2006-01-08 01:00:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-08 20:12:39 -0800 |
commit | b28a02de8c70d41d6b6ba8911e83ed3ccf2e13f8 (patch) | |
tree | dee0feb5d1d9c007d94a9f5d6f3cbf8a83b46b39 | |
parent | 4d268eba1187ef66844a6a33b9431e5d0dadd4ad (diff) |
[PATCH] slab: fix code formatting
The slab allocator code is inconsistent in coding style and messy. For this
patch, I ran Lindent for mm/slab.c and fixed up goofs by hand.
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/slab.c | 964 |
1 files changed, 500 insertions, 464 deletions
diff --git a/mm/slab.c b/mm/slab.c index 2551b1eeadb..f71d8be2f4e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -130,7 +130,6 @@ #define FORCED_DEBUG 0 #endif - /* Shouldn't this be in a header file somewhere? */ #define BYTES_PER_WORD sizeof(void *) @@ -217,12 +216,12 @@ static unsigned long offslab_limit; * Slabs are chained into three list: fully used, partial, fully free slabs. */ struct slab { - struct list_head list; - unsigned long colouroff; - void *s_mem; /* including colour offset */ - unsigned int inuse; /* num of objs active in slab */ - kmem_bufctl_t free; - unsigned short nodeid; + struct list_head list; + unsigned long colouroff; + void *s_mem; /* including colour offset */ + unsigned int inuse; /* num of objs active in slab */ + kmem_bufctl_t free; + unsigned short nodeid; }; /* @@ -242,9 +241,9 @@ struct slab { * We assume struct slab_rcu can overlay struct slab when destroying. */ struct slab_rcu { - struct rcu_head head; - kmem_cache_t *cachep; - void *addr; + struct rcu_head head; + kmem_cache_t *cachep; + void *addr; }; /* @@ -279,23 +278,23 @@ struct array_cache { #define BOOT_CPUCACHE_ENTRIES 1 struct arraycache_init { struct array_cache cache; - void * entries[BOOT_CPUCACHE_ENTRIES]; + void *entries[BOOT_CPUCACHE_ENTRIES]; }; /* * The slab lists for all objects. */ struct kmem_list3 { - struct list_head slabs_partial; /* partial list first, better asm code */ - struct list_head slabs_full; - struct list_head slabs_free; - unsigned long free_objects; - unsigned long next_reap; - int free_touched; - unsigned int free_limit; - spinlock_t list_lock; - struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ + struct list_head slabs_partial; /* partial list first, better asm code */ + struct list_head slabs_full; + struct list_head slabs_free; + unsigned long free_objects; + unsigned long next_reap; + int free_touched; + unsigned int free_limit; + spinlock_t list_lock; + struct array_cache *shared; /* shared per node */ + struct array_cache **alien; /* on other nodes */ }; /* @@ -367,63 +366,63 @@ static inline void kmem_list3_init(struct kmem_list3 *parent) * * manages a cache. */ - + struct kmem_cache { /* 1) per-cpu data, touched during every alloc/free */ - struct array_cache *array[NR_CPUS]; - unsigned int batchcount; - unsigned int limit; - unsigned int shared; - unsigned int objsize; + struct array_cache *array[NR_CPUS]; + unsigned int batchcount; + unsigned int limit; + unsigned int shared; + unsigned int objsize; /* 2) touched by every alloc & free from the backend */ - struct kmem_list3 *nodelists[MAX_NUMNODES]; - unsigned int flags; /* constant flags */ - unsigned int num; /* # of objs per slab */ - spinlock_t spinlock; + struct kmem_list3 *nodelists[MAX_NUMNODES]; + unsigned int flags; /* constant flags */ + unsigned int num; /* # of objs per slab */ + spinlock_t spinlock; /* 3) cache_grow/shrink */ /* order of pgs per slab (2^n) */ - unsigned int gfporder; + unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ - gfp_t gfpflags; + gfp_t gfpflags; - size_t colour; /* cache colouring range */ - unsigned int colour_off; /* colour offset */ - unsigned int colour_next; /* cache colouring */ - kmem_cache_t *slabp_cache; - unsigned int slab_size; - unsigned int dflags; /* dynamic flags */ + size_t colour; /* cache colouring range */ + unsigned int colour_off; /* colour offset */ + unsigned int colour_next; /* cache colouring */ + kmem_cache_t *slabp_cache; + unsigned int slab_size; + unsigned int dflags; /* dynamic flags */ /* constructor func */ - void (*ctor)(void *, kmem_cache_t *, unsigned long); + void (*ctor) (void *, kmem_cache_t *, unsigned long); /* de-constructor func */ - void (*dtor)(void *, kmem_cache_t *, unsigned long); + void (*dtor) (void *, kmem_cache_t *, unsigned long); /* 4) cache creation/removal */ - const char *name; - struct list_head next; + const char *name; + struct list_head next; /* 5) statistics */ #if STATS - unsigned long num_active; - unsigned long num_allocations; - unsigned long high_mark; - unsigned long grown; - unsigned long reaped; - unsigned long errors; - unsigned long max_freeable; - unsigned long node_allocs; - unsigned long node_frees; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; + unsigned long num_active; + unsigned long num_allocations; + unsigned long high_mark; + unsigned long grown; + unsigned long reaped; + unsigned long errors; + unsigned long max_freeable; + unsigned long node_allocs; + unsigned long node_frees; + atomic_t allochit; + atomic_t allocmiss; + atomic_t freehit; + atomic_t freemiss; #endif #if DEBUG - int dbghead; - int reallen; + int dbghead; + int reallen; #endif }; @@ -523,14 +522,15 @@ static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); - return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); + return (unsigned long *)(objp + cachep->objsize - + 2 * BYTES_PER_WORD); + return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD); } static void **dbg_userword(kmem_cache_t *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); - return (void**)(objp+cachep->objsize-BYTES_PER_WORD); + return (void **)(objp + cachep->objsize - BYTES_PER_WORD); } #else @@ -607,31 +607,31 @@ struct cache_names { static struct cache_names __initdata cache_names[] = { #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, #include <linux/kmalloc_sizes.h> - { NULL, } + {NULL,} #undef CACHE }; static struct arraycache_init initarray_cache __initdata = - { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; static struct arraycache_init initarray_generic = - { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ static kmem_cache_t cache_cache = { - .batchcount = 1, - .limit = BOOT_CPUCACHE_ENTRIES, - .shared = 1, - .objsize = sizeof(kmem_cache_t), - .flags = SLAB_NO_REAP, - .spinlock = SPIN_LOCK_UNLOCKED, - .name = "kmem_cache", + .batchcount = 1, + .limit = BOOT_CPUCACHE_ENTRIES, + .shared = 1, + .objsize = sizeof(kmem_cache_t), + .flags = SLAB_NO_REAP, + .spinlock = SPIN_LOCK_UNLOCKED, + .name = "kmem_cache", #if DEBUG - .reallen = sizeof(kmem_cache_t), + .reallen = sizeof(kmem_cache_t), #endif }; /* Guard access to the cache-chain. */ -static struct semaphore cache_chain_sem; +static struct semaphore cache_chain_sem; static struct list_head cache_chain; /* @@ -655,9 +655,9 @@ static enum { static DEFINE_PER_CPU(struct work_struct, reap_work); -static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); -static void enable_cpucache (kmem_cache_t *cachep); -static void cache_reap (void *unused); +static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node); +static void enable_cpucache(kmem_cache_t *cachep); +static void cache_reap(void *unused); static int __node_shrink(kmem_cache_t *cachep, int node); static inline struct array_cache *ac_data(kmem_cache_t *cachep) @@ -671,9 +671,9 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) #if DEBUG /* This happens if someone tries to call - * kmem_cache_create(), or __kmalloc(), before - * the generic caches are initialized. - */ + * kmem_cache_create(), or __kmalloc(), before + * the generic caches are initialized. + */ BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); #endif while (size > csizep->cs_size) @@ -697,10 +697,10 @@ EXPORT_SYMBOL(kmem_find_general_cachep); /* Cal the num objs, wastage, and bytes left over for a given slab size. */ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, - int flags, size_t *left_over, unsigned int *num) + int flags, size_t *left_over, unsigned int *num) { int i; - size_t wastage = PAGE_SIZE<<gfporder; + size_t wastage = PAGE_SIZE << gfporder; size_t extra = 0; size_t base = 0; @@ -709,7 +709,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, extra = sizeof(kmem_bufctl_t); } i = 0; - while (i*size + ALIGN(base+i*extra, align) <= wastage) + while (i * size + ALIGN(base + i * extra, align) <= wastage) i++; if (i > 0) i--; @@ -718,8 +718,8 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, i = SLAB_LIMIT; *num = i; - wastage -= i*size; - wastage -= ALIGN(base+i*extra, align); + wastage -= i * size; + wastage -= ALIGN(base + i * extra, align); *left_over = wastage; } @@ -728,7 +728,7 @@ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) { printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", - function, cachep->name, msg); + function, cachep->name, msg); dump_stack(); } @@ -755,9 +755,9 @@ static void __devinit start_cpu_timer(int cpu) } static struct array_cache *alloc_arraycache(int node, int entries, - int batchcount) + int batchcount) { - int memsize = sizeof(void*)*entries+sizeof(struct array_cache); + int memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *nc = NULL; nc = kmalloc_node(memsize, GFP_KERNEL, node); @@ -775,7 +775,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, static inline struct array_cache **alloc_alien_cache(int node, int limit) { struct array_cache **ac_ptr; - int memsize = sizeof(void*)*MAX_NUMNODES; + int memsize = sizeof(void *) * MAX_NUMNODES; int i; if (limit > 1) @@ -789,7 +789,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) } ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); if (!ac_ptr[i]) { - for (i--; i <=0; i--) + for (i--; i <= 0; i--) kfree(ac_ptr[i]); kfree(ac_ptr); return NULL; @@ -807,12 +807,13 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) return; for_each_node(i) - kfree(ac_ptr[i]); + kfree(ac_ptr[i]); kfree(ac_ptr); } -static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node) +static inline void __drain_alien_cache(kmem_cache_t *cachep, + struct array_cache *ac, int node) { struct kmem_list3 *rl3 = cachep->nodelists[node]; @@ -826,7 +827,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) { - int i=0; + int i = 0; struct array_cache *ac; unsigned long flags; @@ -846,10 +847,10 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) #endif static int __devinit cpuup_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) + unsigned long action, void *hcpu) { long cpu = (long)hcpu; - kmem_cache_t* cachep; + kmem_cache_t *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); int memsize = sizeof(struct kmem_list3); @@ -871,27 +872,27 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, */ if (!cachep->nodelists[node]) { if (!(l3 = kmalloc_node(memsize, - GFP_KERNEL, node))) + GFP_KERNEL, node))) goto bad; kmem_list3_init(l3); l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; cachep->nodelists[node] = l3; } spin_lock_irq(&cachep->nodelists[node]->list_lock); cachep->nodelists[node]->free_limit = - (1 + nr_cpus_node(node)) * - cachep->batchcount + cachep->num; + (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; spin_unlock_irq(&cachep->nodelists[node]->list_lock); } /* Now we can go ahead with allocating the shared array's - & array cache's */ + & array cache's */ list_for_each_entry(cachep, &cache_chain, next) { nc = alloc_arraycache(node, cachep->limit, - cachep->batchcount); + cachep->batchcount); if (!nc) goto bad; cachep->array[cpu] = nc; @@ -900,12 +901,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, BUG_ON(!l3); if (!l3->shared) { if (!(nc = alloc_arraycache(node, - cachep->shared*cachep->batchcount, - 0xbaadf00d))) - goto bad; + cachep->shared * + cachep->batchcount, + 0xbaadf00d))) + goto bad; /* we are serialised from CPU_DEAD or - CPU_UP_CANCELLED by the cpucontrol lock */ + CPU_UP_CANCELLED by the cpucontrol lock */ l3->shared = nc; } } @@ -942,13 +944,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, free_block(cachep, nc->entry, nc->avail, node); if (!cpus_empty(mask)) { - spin_unlock(&l3->list_lock); - goto unlock_cache; - } + spin_unlock(&l3->list_lock); + goto unlock_cache; + } if (l3->shared) { free_block(cachep, l3->shared->entry, - l3->shared->avail, node); + l3->shared->avail, node); kfree(l3->shared); l3->shared = NULL; } @@ -966,7 +968,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, } else { spin_unlock(&l3->list_lock); } -unlock_cache: + unlock_cache: spin_unlock_irq(&cachep->spinlock); kfree(nc); } @@ -975,7 +977,7 @@ unlock_cache: #endif } return NOTIFY_OK; -bad: + bad: up(&cache_chain_sem); return NOTIFY_BAD; } @@ -985,8 +987,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; /* * swap the static kmem_list3 with kmalloced memory */ -static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, - int nodeid) +static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid) { struct kmem_list3 *ptr; @@ -1055,14 +1056,14 @@ void __init kmem_cache_init(void) cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, - &left_over, &cache_cache.num); + &left_over, &cache_cache.num); if (!cache_cache.num) BUG(); - cache_cache.colour = left_over/cache_cache.colour_off; + cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.colour_next = 0; - cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) + - sizeof(struct slab), cache_line_size()); + cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + + sizeof(struct slab), cache_line_size()); /* 2+3) create the kmalloc caches */ sizes = malloc_sizes; @@ -1074,14 +1075,18 @@ void __init kmem_cache_init(void) */ sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); + sizes[INDEX_AC].cs_size, + ARCH_KMALLOC_MINALIGN, + (ARCH_KMALLOC_FLAGS | + SLAB_PANIC), NULL, NULL); if (INDEX_AC != INDEX_L3) sizes[INDEX_L3].cs_cachep = - kmem_cache_create(names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); + kmem_cache_create(names[INDEX_L3].name, + sizes[INDEX_L3].cs_size, + ARCH_KMALLOC_MINALIGN, + (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, + NULL); while (sizes->cs_size != ULONG_MAX) { /* @@ -1091,35 +1096,41 @@ void __init kmem_cache_init(void) * Note for systems short on memory removing the alignment will * allow tighter packing of the smaller caches. */ - if(!sizes->cs_cachep) + if (!sizes->cs_cachep) sizes->cs_cachep = kmem_cache_create(names->name, - sizes->cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); + sizes->cs_size, + ARCH_KMALLOC_MINALIGN, + (ARCH_KMALLOC_FLAGS + | SLAB_PANIC), + NULL, NULL); /* Inc off-slab bufctl limit until the ceiling is hit. */ if (!(OFF_SLAB(sizes->cs_cachep))) { - offslab_limit = sizes->cs_size-sizeof(struct slab); + offslab_limit = sizes->cs_size - sizeof(struct slab); offslab_limit /= sizeof(kmem_bufctl_t); } sizes->cs_dmacachep = kmem_cache_create(names->name_dma, - sizes->cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC), - NULL, NULL); + sizes->cs_size, + ARCH_KMALLOC_MINALIGN, + (ARCH_KMALLOC_FLAGS | + SLAB_CACHE_DMA | + SLAB_PANIC), NULL, + NULL); sizes++; names++; } /* 4) Replace the bootstrap head arrays */ { - void * ptr; + void *ptr; ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); local_irq_disable(); BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); memcpy(ptr, ac_data(&cache_cache), - sizeof(struct arraycache_init)); + sizeof(struct arraycache_init)); cache_cache.array[smp_processor_id()] = ptr; local_irq_enable(); @@ -1127,11 +1138,11 @@ void __init kmem_cache_init(void) local_irq_disable(); BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) - != &initarray_generic.cache); + != &initarray_generic.cache); memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), - sizeof(struct arraycache_init)); + sizeof(struct arraycache_init)); malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = - ptr; + ptr; local_irq_enable(); } /* 5) Replace the bootstrap kmem_list3's */ @@ -1139,16 +1150,16 @@ void __init kmem_cache_init(void) int node; /* Replace the static kmem_list3 structures for the boot cpu */ init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], - numa_node_id()); + numa_node_id()); for_each_online_node(node) { init_list(malloc_sizes[INDEX_AC].cs_cachep, - &initkmem_list3[SIZE_AC+node], node); + &initkmem_list3[SIZE_AC + node], node); if (INDEX_AC != INDEX_L3) { init_list(malloc_sizes[INDEX_L3].cs_cachep, - &initkmem_list3[SIZE_L3+node], - node); + &initkmem_list3[SIZE_L3 + node], + node); } } } @@ -1158,7 +1169,7 @@ void __init kmem_cache_init(void) kmem_cache_t *cachep; down(&cache_chain_sem); list_for_each_entry(cachep, &cache_chain, next) - enable_cpucache(cachep); + enable_cpucache(cachep); up(&cache_chain_sem); } @@ -1184,7 +1195,7 @@ static int __init cpucache_init(void) * pages to gfp. */ for_each_online_cpu(cpu) - start_cpu_timer(cpu); + start_cpu_timer(cpu); return 0; } @@ -1226,7 +1237,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) */ static void kmem_freepages(kmem_cache_t *cachep, void *addr) { - unsigned long i = (1<<cachep->gfporder); + unsigned long i = (1 << cachep->gfporder); struct page *page = virt_to_page(addr); const unsigned long nr_freed = i; @@ -1239,13 +1250,13 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr) if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; free_pages((unsigned long)addr, cachep->gfporder); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages); + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages); } static void kmem_rcu_free(struct rcu_head *head) { - struct slab_rcu *slab_rcu = (struct slab_rcu *) head; + struct slab_rcu *slab_rcu = (struct slab_rcu *)head; kmem_cache_t *cachep = slab_rcu->cachep; kmem_freepages(cachep, slab_rcu->addr); @@ -1257,19 +1268,19 @@ static void kmem_rcu_free(struct rcu_head *head) #ifdef CONFIG_DEBUG_PAGEALLOC static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, - unsigned long caller) + unsigned long caller) { int size = obj_reallen(cachep); - addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; + addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)]; - if (size < 5*sizeof(unsigned long)) + if (size < 5 * sizeof(unsigned long)) return; - *addr++=0x12345678; - *addr++=caller; - *addr++=smp_processor_id(); - size -= 3*sizeof(unsigned long); + *addr++ = 0x12345678; + *addr++ = caller; + *addr++ = smp_processor_id(); + size -= 3 * sizeof(unsigned long); { unsigned long *sptr = &caller; unsigned long svalue; @@ -1277,7 +1288,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, while (!kstack_end(sptr)) { svalue = *sptr++; if (kernel_text_address(svalue)) { - *addr++=svalue; + *addr++ = svalue; size -= sizeof(unsigned long); if (size <= sizeof(unsigned long)) break; @@ -1285,25 +1296,25 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, } } - *addr++=0x87654321; + *addr++ = 0x87654321; } #endif static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) { int size = obj_reallen(cachep); - addr = &((char*)addr)[obj_dbghead(cachep)]; + addr = &((char *)addr)[obj_dbghead(cachep)]; memset(addr, val, size); - *(unsigned char *)(addr+size-1) = POISON_END; + *(unsigned char *)(addr + size - 1) = POISON_END; } static void dump_line(char *data, int offset, int limit) { int i; printk(KERN_ERR "%03x:", offset); - for (i=0;i<limit;i++) { - printk(" %02x", (unsigned char)data[offset+i]); + for (i = 0; i < limit; i++) { + printk(" %02x", (unsigned char)data[offset + i]); } printk("\n"); } @@ -1318,24 +1329,24 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) if (cachep->flags & SLAB_RED_ZONE) { printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", - *dbg_redzone1(cachep, objp), - *dbg_redzone2(cachep, objp)); + *dbg_redzone1(cachep, objp), + *dbg_redzone2(cachep, objp)); } if (cachep->flags & SLAB_STORE_USER) { printk(KERN_ERR "Last user: [<%p>]", - *dbg_userword(cachep, objp)); + *dbg_userword(cachep, objp)); print_symbol("(%s)", - (unsigned long)*dbg_userword(cachep, objp)); + (unsigned long)*dbg_userword(cachep, objp)); printk("\n"); } - realobj = (char*)objp+obj_dbghead(cachep); + realobj = (char *)objp + obj_dbghead(cachep); size = obj_reallen(cachep); - for (i=0; i<size && lines;i+=16, lines--) { + for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; - if (i+limit > size) - limit = size-i; + if (i + limit > size) + limit = size - i; dump_line(realobj, i, limit); } } @@ -1346,27 +1357,28 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) int size, i; int lines = 0; - realobj = (char*)objp+obj_dbghead(cachep); + realobj = (char *)objp + obj_dbghead(cachep); size = obj_reallen(cachep); - for (i=0;i<size;i++) { + for (i = 0; i < size; i++) { char exp = POISON_FREE; - if (i == size-1) + if (i == size - 1) exp = POISON_END; if (realobj[i] != exp) { int limit; /* Mismatch ! */ /* Print header */ if (lines == 0) { - printk(KERN_ERR "Slab corruption: start=%p, len=%d\n", - realobj, size); + printk(KERN_ERR + "Slab corruption: start=%p, len=%d\n", + realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ - i = (i/16)*16; + i = (i / 16) * 16; limit = 16; - if (i+limit > size) - limit = size-i; + if (i + limit > size) + limit = size - i; dump_line(realobj, i, limit); i += 16; lines++; @@ -1382,19 +1394,19 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) struct slab *slabp = page_get_slab(virt_to_page(objp)); int objnr; - objnr = (objp-slabp->s_mem)/cachep->objsize; + objnr = (objp - slabp->s_mem) / cachep->objsize; if (objnr) { - objp = slabp->s_mem+(objnr-1)*cachep->objsize; - realobj = (char*)objp+obj_dbghead(cachep); + objp = slabp->s_mem + (objnr - 1) * cachep->objsize; + realobj = (char *)objp + obj_dbghead(cachep); printk(KERN_ERR "Prev obj: start=%p, len=%d\n", - realobj, size); + realobj, size); print_objinfo(cachep, objp, 2); } - if (objnr+1 < cachep->num) { - objp = slabp->s_mem+(objnr+1)*cachep->objsize; - realobj = (char*)objp+obj_dbghead(cachep); + if (objnr + 1 < cachep->num) { + objp = slabp->s_mem + (objnr + 1) * cachep->objsize; + realobj = (char *)objp + obj_dbghead(cachep); printk(KERN_ERR "Next obj: start=%p, len=%d\n", - realobj, size); + realobj, size); print_objinfo(cachep, objp, 2); } } @@ -1405,7 +1417,7 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) * Before calling the slab must have been unlinked from the cache. * The cache-lock is not held/needed. */ -static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) +static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) { void *addr = slabp->s_mem - slabp->colouroff; @@ -1416,8 +1428,11 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep)) - kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1); + if ((cachep->objsize % PAGE_SIZE) == 0 + && OFF_SLAB(cachep)) + kernel_map_pages(virt_to_page(objp), + cachep->objsize / PAGE_SIZE, + 1); else check_poison_obj(cachep, objp); #else @@ -1427,20 +1442,20 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "start of a freed object " - "was overwritten"); + "was overwritten"); if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "end of a freed object " - "was overwritten"); + "was overwritten"); } if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); + (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0); } #else if (cachep->dtor) { int i; for (i = 0; i < cachep->num; i++) { - void* objp = slabp->s_mem+cachep->objsize*i; - (cachep->dtor)(objp, cachep, 0); + void *objp = slabp->s_mem + cachep->objsize * i; + (cachep->dtor) (objp, cachep, 0); } } #endif @@ -1448,7 +1463,7 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { struct slab_rcu *slab_rcu; - slab_rcu = (struct slab_rcu *) slabp; + slab_rcu = (struct slab_rcu *)slabp; slab_rcu->cachep = cachep; slab_rcu->addr = addr; call_rcu(&slab_rcu->head, kmem_rcu_free); @@ -1466,10 +1481,10 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) int node; for_each_online_node(node) { - cachep->nodelists[node] = &initkmem_list3[index+node]; + cachep->nodelists[node] = &initkmem_list3[index + node]; cachep->nodelists[node]->next_reap = jiffies + - REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; + REAPTIMEOUT_LIST3 + + ((unsigned long)cachep) % REAPTIMEOUT_LIST3; } } @@ -1486,7 +1501,7 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size, { size_t left_over = 0; - for ( ; ; cachep->gfporder++) { + for (;; cachep->gfporder++) { unsigned int num; size_t remainder; @@ -1566,14 +1581,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, * Sanity checks... these are all serious usage bugs. */ if ((!name) || - in_interrupt() || - (size < BYTES_PER_WORD) || - (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) || - (dtor && !ctor)) { - printk(KERN_ERR "%s: Early error in slab %s\n", - __FUNCTION__, name); - BUG(); - } + in_interrupt() || + (size < BYTES_PER_WORD) || + (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) { + printk(KERN_ERR "%s: Early error in slab %s\n", + __FUNCTION__, name); + BUG(); + } down(&cache_chain_sem); @@ -1593,11 +1607,11 @@ kmem_cache_create (const char *name, size_t size, size_t align, set_fs(old_fs); if (res) { printk("SLAB: cache with size %d has lost its name\n", - pc->objsize); + pc->objsize); continue; } - if (!strcmp(pc->name,name)) { + if (!strcmp(pc->name, name)) { printk("kmem_cache_create: duplicate cache %s\n", name); dump_stack(); goto oops; @@ -1609,10 +1623,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { /* No constructor, but inital state check requested */ printk(KERN_ERR "%s: No con, but init state check " - "requested - %s\n", __FUNCTION__, name); + "requested - %s\n", __FUNCTION__, name); flags &= ~SLAB_DEBUG_INITIAL; } - #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with @@ -1620,8 +1633,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */ - if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))) - flags |= SLAB_RED_ZONE|SLAB_STORE_USER; + if ((size < 4096 + || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD))) + flags |= SLAB_RED_ZONE | SLAB_STORE_USER; if (!(flags & SLAB_DESTROY_BY_RCU)) flags |= SLAB_POISON; #endif @@ -1642,9 +1656,9 @@ kmem_cache_create (const char *name, size_t size, size_t align, * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. */ - if (size & (BYTES_PER_WORD-1)) { - size += (BYTES_PER_WORD-1); - size &= ~(BYTES_PER_WORD-1); + if (size & (BYTES_PER_WORD - 1)) { + size += (BYTES_PER_WORD - 1); + size &= ~(BYTES_PER_WORD - 1); } /* calculate out the final buffer alignment: */ @@ -1655,7 +1669,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * objects into one cacheline. */ ralign = cache_line_size(); - while (size <= ralign/2) + while (size <= ralign / 2) ralign /= 2; } else { ralign = BYTES_PER_WORD; @@ -1664,13 +1678,13 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (ralign < ARCH_SLAB_MINALIGN) { ralign = ARCH_SLAB_MINALIGN; if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } /* 3) caller mandated alignment: disables debug if necessary */ if (ralign < align) { ralign = align; if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); } /* 4) Store it. Note that the debug code below can reduce * the alignment to BYTES_PER_WORD. @@ -1692,7 +1706,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* add space for red zone words */ cachep->dbghead += BYTES_PER_WORD; - size += 2*BYTES_PER_WORD; + size += 2 * BYTES_PER_WORD; } if (flags & SLAB_STORE_USER) { /* user store requires word alignment and @@ -1703,7 +1717,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { + if (size >= malloc_sizes[INDEX_L3 + 1].cs_size + && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { cachep->dbghead += PAGE_SIZE - size; size = PAGE_SIZE; } @@ -1711,7 +1726,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, #endif /* Determine if the slab management is 'on' or 'off' slab. */ - if (size >= (PAGE_SIZE>>3)) + if (size >= (PAGE_SIZE >> 3)) /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). @@ -1728,7 +1743,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ cachep->gfporder = 0; cache_estimate(cachep->gfporder, size, align, flags, - &left_over, &cachep->num); + &left_over, &cachep->num); } else left_over = calculate_slab_order(cachep, size, align, flags); @@ -1738,8 +1753,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, cachep = NULL; goto oops; } - slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) - + sizeof(struct slab), align); + slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t) + + sizeof(struct slab), align); /* * If the slab has been placed off-slab, and we have enough space then @@ -1752,14 +1767,15 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (flags & CFLGS_OFF_SLAB) { /* really off slab. No need for manual alignment */ - slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab); + slab_size = + cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab); } cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ if (cachep->colour_off < align) cachep->colour_off = align; - cachep->colour = left_over/cachep->colour_off; + cachep->colour = left_over / cachep->colour_off; cachep->slab_size = slab_size; cachep->flags = flags; cachep->gfpflags = 0; @@ -1786,7 +1802,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, * the creation of further caches will BUG(). */ cachep->array[smp_processor_id()] = - &initarray_generic.cache; + &initarray_generic.cache; /* If the cache that's used by * kmalloc(sizeof(kmem_list3)) is the first cache, @@ -1800,8 +1816,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, g_cpucache_up = PARTIAL_AC; } else { cachep->array[smp_processor_id()] = - kmalloc(sizeof(struct arraycache_init), - GFP_KERNEL); + kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); if (g_cpucache_up == PARTIAL_AC) { |