diff options
author | Jack Steiner <steiner@sgi.com> | 2006-03-07 21:55:46 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-08 14:15:04 -0800 |
commit | 07ed76b2a085a31f427c2a912a562627947dc7de (patch) | |
tree | da15ed460ffa5e43eb644ca0958925275b4b526b /mm | |
parent | 731805b49489055c1548f7ccfbd44c9b84013264 (diff) |
[PATCH] slab: allocate larger cache_cache if order 0 fails
kmem_cache_init() incorrectly assumes that the cache_cache object will fit
in an order 0 allocation. On very large systems, this is not true. Change
the code to try larger order allocations if order 0 fails.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c index 6ad6bd5a0b3..61800b88e24 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1124,6 +1124,7 @@ void __init kmem_cache_init(void) struct cache_sizes *sizes; struct cache_names *names; int i; + int order; for (i = 0; i < NUM_INIT_LISTS; i++) { kmem_list3_init(&initkmem_list3[i]); @@ -1167,11 +1168,15 @@ void __init kmem_cache_init(void) cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size()); - cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0, - &left_over, &cache_cache.num); + for (order = 0; order < MAX_ORDER; order++) { + cache_estimate(order, cache_cache.buffer_size, + cache_line_size(), 0, &left_over, &cache_cache.num); + if (cache_cache.num) + break; + } if (!cache_cache.num) BUG(); - + cache_cache.gfporder = order; cache_cache.colour = left_over / cache_cache.colour_off; cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) + sizeof(struct slab), cache_line_size()); |