diff options
Diffstat (limited to 'mm/slob.c')
| -rw-r--r-- | mm/slob.c | 317 | 
1 files changed, 130 insertions, 187 deletions
diff --git a/mm/slob.c b/mm/slob.c index 617b6d6c42c..21980e0f39a 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -28,9 +28,8 @@   * from kmalloc are prepended with a 4-byte header with the kmalloc size.   * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls   * alloc_pages() directly, allocating compound pages so the page order - * does not have to be separately tracked, and also stores the exact - * allocation size in page->private so that it can be used to accurately - * provide ksize(). These objects are detected in kfree() because slob_page() + * does not have to be separately tracked. + * These objects are detected in kfree() because PageSlab()   * is false for them.   *   * SLAB is emulated on top of SLOB by simply calling constructors and @@ -59,19 +58,21 @@  #include <linux/kernel.h>  #include <linux/slab.h> +  #include <linux/mm.h>  #include <linux/swap.h> /* struct reclaim_state */  #include <linux/cache.h>  #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h>  #include <linux/rcupdate.h>  #include <linux/list.h>  #include <linux/kmemleak.h>  #include <trace/events/kmem.h> -#include <asm/atomic.h> +#include <linux/atomic.h> +#include "slab.h"  /*   * slob_block has a field 'units', which indicates size of block if +ve,   * or offset of next block if -ve (in SLOB_UNITs). @@ -92,36 +93,6 @@ struct slob_block {  typedef struct slob_block slob_t;  /* - * We use struct page fields to manage some slob allocation aspects, - * however to avoid the horrible mess in include/linux/mm_types.h, we'll - * just define our own struct page type variant here. - */ -struct slob_page { -	union { -		struct { -			unsigned long flags;	/* mandatory */ -			atomic_t _count;	/* mandatory */ -			slobidx_t units;	/* free units left in page */ -			unsigned long pad[2]; -			slob_t *free;		/* first free slob_t in page */ -			struct list_head list;	/* linked list of free pages */ -		}; -		struct page page; -	}; -}; -static inline void struct_slob_page_wrong_size(void) -{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } - -/* - * free_slob_page: call before a slob_page is returned to the page allocator. - */ -static inline void free_slob_page(struct slob_page *sp) -{ -	reset_page_mapcount(&sp->page); -	sp->page.mapping = NULL; -} - -/*   * All partially free slob pages go on these lists.   */  #define SLOB_BREAK1 256 @@ -131,51 +102,27 @@ static LIST_HEAD(free_slob_medium);  static LIST_HEAD(free_slob_large);  /* - * is_slob_page: True for all slob pages (false for bigblock pages) - */ -static inline int is_slob_page(struct slob_page *sp) -{ -	return PageSlab((struct page *)sp); -} - -static inline void set_slob_page(struct slob_page *sp) -{ -	__SetPageSlab((struct page *)sp); -} - -static inline void clear_slob_page(struct slob_page *sp) -{ -	__ClearPageSlab((struct page *)sp); -} - -static inline struct slob_page *slob_page(const void *addr) -{ -	return (struct slob_page *)virt_to_page(addr); -} - -/*   * slob_page_free: true for pages on free_slob_pages list.   */ -static inline int slob_page_free(struct slob_page *sp) +static inline int slob_page_free(struct page *sp)  { -	return PageSlobFree((struct page *)sp); +	return PageSlobFree(sp);  } -static void set_slob_page_free(struct slob_page *sp, struct list_head *list) +static void set_slob_page_free(struct page *sp, struct list_head *list)  { -	list_add(&sp->list, list); -	__SetPageSlobFree((struct page *)sp); +	list_add(&sp->lru, list); +	__SetPageSlobFree(sp);  } -static inline void clear_slob_page_free(struct slob_page *sp) +static inline void clear_slob_page_free(struct page *sp)  { -	list_del(&sp->list); -	__ClearPageSlobFree((struct page *)sp); +	list_del(&sp->lru); +	__ClearPageSlobFree(sp);  }  #define SLOB_UNIT sizeof(slob_t) -#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) -#define SLOB_ALIGN L1_CACHE_BYTES +#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)  /*   * struct slob_rcu is inserted at the tail of allocated slob blocks, which @@ -245,7 +192,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)  	void *page;  #ifdef CONFIG_NUMA -	if (node != -1) +	if (node != NUMA_NO_NODE)  		page = alloc_pages_exact_node(node, gfp, order);  	else  #endif @@ -267,12 +214,12 @@ static void slob_free_pages(void *b, int order)  /*   * Allocate a slob block within a given slob_page sp.   */ -static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) +static void *slob_page_alloc(struct page *sp, size_t size, int align)  {  	slob_t *prev, *cur, *aligned = NULL;  	int delta = 0, units = SLOB_UNITS(size); -	for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { +	for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {  		slobidx_t avail = slob_units(cur);  		if (align) { @@ -296,12 +243,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)  				if (prev)  					set_slob(prev, slob_units(prev), next);  				else -					sp->free = next; +					sp->freelist = next;  			} else { /* fragment */  				if (prev)  					set_slob(prev, slob_units(prev), cur + units);  				else -					sp->free = cur + units; +					sp->freelist = cur + units;  				set_slob(cur + units, avail - units, next);  			} @@ -320,7 +267,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)   */  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)  { -	struct slob_page *sp; +	struct page *sp;  	struct list_head *prev;  	struct list_head *slob_list;  	slob_t *b = NULL; @@ -335,13 +282,13 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)  	spin_lock_irqsave(&slob_lock, flags);  	/* Iterate through each partially free page, try to find room */ -	list_for_each_entry(sp, slob_list, list) { +	list_for_each_entry(sp, slob_list, lru) {  #ifdef CONFIG_NUMA  		/*  		 * If there's a node specification, search for a partial  		 * page with a matching node id in the freelist.  		 */ -		if (node != -1 && page_to_nid(&sp->page) != node) +		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)  			continue;  #endif  		/* Enough room on this page? */ @@ -349,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)  			continue;  		/* Attempt to alloc */ -		prev = sp->list.prev; +		prev = sp->lru.prev;  		b = slob_page_alloc(sp, size, align);  		if (!b)  			continue; @@ -369,13 +316,13 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);  		if (!b)  			return NULL; -		sp = slob_page(b); -		set_slob_page(sp); +		sp = virt_to_page(b); +		__SetPageSlab(sp);  		spin_lock_irqsave(&slob_lock, flags);  		sp->units = SLOB_UNITS(PAGE_SIZE); -		sp->free = b; -		INIT_LIST_HEAD(&sp->list); +		sp->freelist = b; +		INIT_LIST_HEAD(&sp->lru);  		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));  		set_slob_page_free(sp, slob_list);  		b = slob_page_alloc(sp, size, align); @@ -392,7 +339,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)   */  static void slob_free(void *block, int size)  { -	struct slob_page *sp; +	struct page *sp;  	slob_t *prev, *next, *b = (slob_t *)block;  	slobidx_t units;  	unsigned long flags; @@ -402,7 +349,7 @@ static void slob_free(void *block, int size)  		return;  	BUG_ON(!size); -	sp = slob_page(block); +	sp = virt_to_page(block);  	units = SLOB_UNITS(size);  	spin_lock_irqsave(&slob_lock, flags); @@ -412,8 +359,8 @@ static void slob_free(void *block, int size)  		if (slob_page_free(sp))  			clear_slob_page_free(sp);  		spin_unlock_irqrestore(&slob_lock, flags); -		clear_slob_page(sp); -		free_slob_page(sp); +		__ClearPageSlab(sp); +		page_mapcount_reset(sp);  		slob_free_pages(b, 0);  		return;  	} @@ -421,7 +368,7 @@ static void slob_free(void *block, int size)  	if (!slob_page_free(sp)) {  		/* This slob page is about to become partially free. Easy! */  		sp->units = units; -		sp->free = b; +		sp->freelist = b;  		set_slob(b, units,  			(void *)((unsigned long)(b +  					SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); @@ -441,15 +388,15 @@ static void slob_free(void *block, int size)  	 */  	sp->units += units; -	if (b < sp->free) { -		if (b + units == sp->free) { -			units += slob_units(sp->free); -			sp->free = slob_next(sp->free); +	if (b < (slob_t *)sp->freelist) { +		if (b + units == sp->freelist) { +			units += slob_units(sp->freelist); +			sp->freelist = slob_next(sp->freelist);  		} -		set_slob(b, units, sp->free); -		sp->free = b; +		set_slob(b, units, sp->freelist); +		sp->freelist = b;  	} else { -		prev = sp->free; +		prev = sp->freelist;  		next = slob_next(prev);  		while (b > next) {  			prev = next; @@ -476,12 +423,15 @@ out:   * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.   */ -void *__kmalloc_node(size_t size, gfp_t gfp, int node) +static __always_inline void * +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)  {  	unsigned int *m; -	int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +	int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);  	void *ret; +	gfp &= gfp_allowed_mask; +  	lockdep_trace_alloc(gfp);  	if (size < PAGE_SIZE - align) { @@ -495,7 +445,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)  		*m = size;  		ret = (void *)m + align; -		trace_kmalloc_node(_RET_IP_, ret, +		trace_kmalloc_node(caller, ret,  				   size, size + align, gfp, node);  	} else {  		unsigned int order = get_order(size); @@ -503,24 +453,39 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)  		if (likely(order))  			gfp |= __GFP_COMP;  		ret = slob_new_pages(gfp, order, node); -		if (ret) { -			struct page *page; -			page = virt_to_page(ret); -			page->private = size; -		} -		trace_kmalloc_node(_RET_IP_, ret, +		trace_kmalloc_node(caller, ret,  				   size, PAGE_SIZE << order, gfp, node);  	}  	kmemleak_alloc(ret, size, 1, gfp);  	return ret;  } -EXPORT_SYMBOL(__kmalloc_node); + +void *__kmalloc(size_t size, gfp_t gfp) +{ +	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc); + +#ifdef CONFIG_TRACING +void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) +{ +	return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); +} + +#ifdef CONFIG_NUMA +void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, +					int node, unsigned long caller) +{ +	return __do_kmalloc_node(size, gfp, node, caller); +} +#endif +#endif  void kfree(const void *block)  { -	struct slob_page *sp; +	struct page *sp;  	trace_kfree(_RET_IP_, block); @@ -528,105 +493,94 @@ void kfree(const void *block)  		return;  	kmemleak_free(block); -	sp = slob_page(block); -	if (is_slob_page(sp)) { -		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +	sp = virt_to_page(block); +	if (PageSlab(sp)) { +		int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);  		unsigned int *m = (unsigned int *)(block - align);  		slob_free(m, *m + align);  	} else -		put_page(&sp->page); +		__free_pages(sp, compound_order(sp));  }  EXPORT_SYMBOL(kfree);  /* can't use ksize for kmem_cache_alloc memory, only kmalloc */  size_t ksize(const void *block)  { -	struct slob_page *sp; +	struct page *sp; +	int align; +	unsigned int *m;  	BUG_ON(!block);  	if (unlikely(block == ZERO_SIZE_PTR))  		return 0; -	sp = slob_page(block); -	if (is_slob_page(sp)) { -		int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); -		unsigned int *m = (unsigned int *)(block - align); -		return SLOB_UNITS(*m) * SLOB_UNIT; -	} else -		return sp->page.private; -} -EXPORT_SYMBOL(ksize); - -struct kmem_cache { -	unsigned int size, align; -	unsigned long flags; -	const char *name; -	void (*ctor)(void *); -}; - -struct kmem_cache *kmem_cache_create(const char *name, size_t size, -	size_t align, unsigned long flags, void (*ctor)(void *)) -{ -	struct kmem_cache *c; - -	c = slob_alloc(sizeof(struct kmem_cache), -		GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); +	sp = virt_to_page(block); +	if (unlikely(!PageSlab(sp))) +		return PAGE_SIZE << compound_order(sp); -	if (c) { -		c->name = name; -		c->size = size; -		if (flags & SLAB_DESTROY_BY_RCU) { -			/* leave room for rcu footer at the end of object */ -			c->size += sizeof(struct slob_rcu); -		} -		c->flags = flags; -		c->ctor = ctor; -		/* ignore alignment unless it's forced */ -		c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; -		if (c->align < ARCH_SLAB_MINALIGN) -			c->align = ARCH_SLAB_MINALIGN; -		if (c->align < align) -			c->align = align; -	} else if (flags & SLAB_PANIC) -		panic("Cannot create slab cache %s\n", name); - -	kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); -	return c; +	align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +	m = (unsigned int *)(block - align); +	return SLOB_UNITS(*m) * SLOB_UNIT;  } -EXPORT_SYMBOL(kmem_cache_create); +EXPORT_SYMBOL(ksize); -void kmem_cache_destroy(struct kmem_cache *c) +int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)  { -	kmemleak_free(c); -	if (c->flags & SLAB_DESTROY_BY_RCU) -		rcu_barrier(); -	slob_free(c, sizeof(struct kmem_cache)); +	if (flags & SLAB_DESTROY_BY_RCU) { +		/* leave room for rcu footer at the end of object */ +		c->size += sizeof(struct slob_rcu); +	} +	c->flags = flags; +	return 0;  } -EXPORT_SYMBOL(kmem_cache_destroy); -void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) +void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)  {  	void *b; +	flags &= gfp_allowed_mask; + +	lockdep_trace_alloc(flags); +  	if (c->size < PAGE_SIZE) {  		b = slob_alloc(c->size, flags, c->align, node); -		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, +		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,  					    SLOB_UNITS(c->size) * SLOB_UNIT,  					    flags, node);  	} else {  		b = slob_new_pages(flags, get_order(c->size), node); -		trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, +		trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,  					    PAGE_SIZE << get_order(c->size),  					    flags, node);  	} -	if (c->ctor) +	if (b && c->ctor)  		c->ctor(b);  	kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);  	return b;  } +EXPORT_SYMBOL(slob_alloc_node); + +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ +	return slob_alloc_node(cachep, flags, NUMA_NO_NODE); +} +EXPORT_SYMBOL(kmem_cache_alloc); + +#ifdef CONFIG_NUMA +void *__kmalloc_node(size_t size, gfp_t gfp, int node) +{ +	return __do_kmalloc_node(size, gfp, node, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc_node); + +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) +{ +	return slob_alloc_node(cachep, gfp, node); +}  EXPORT_SYMBOL(kmem_cache_alloc_node); +#endif  static void __kmem_cache_free(void *b, int size)  { @@ -660,42 +614,31 @@ void kmem_cache_free(struct kmem_cache *c, void *b)  }  EXPORT_SYMBOL(kmem_cache_free); -unsigned int kmem_cache_size(struct kmem_cache *c) -{ -	return c->size; -} -EXPORT_SYMBOL(kmem_cache_size); - -const char *kmem_cache_name(struct kmem_cache *c) -{ -	return c->name; -} -EXPORT_SYMBOL(kmem_cache_name); - -int kmem_cache_shrink(struct kmem_cache *d) +int __kmem_cache_shutdown(struct kmem_cache *c)  { +	/* No way to check for remaining objects */  	return 0;  } -EXPORT_SYMBOL(kmem_cache_shrink); -int kmem_ptr_validate(struct kmem_cache *a, const void *b) +int __kmem_cache_shrink(struct kmem_cache *d)  {  	return 0;  } -static unsigned int slob_ready __read_mostly; - -int slab_is_available(void) -{ -	return slob_ready; -} +struct kmem_cache kmem_cache_boot = { +	.name = "kmem_cache", +	.size = sizeof(struct kmem_cache), +	.flags = SLAB_PANIC, +	.align = ARCH_KMALLOC_MINALIGN, +};  void __init kmem_cache_init(void)  { -	slob_ready = 1; +	kmem_cache = &kmem_cache_boot; +	slab_state = UP;  }  void __init kmem_cache_init_late(void)  { -	/* Nothing to do */ +	slab_state = FULL;  }  | 
