diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 5156 |
1 files changed, 2984 insertions, 2172 deletions
diff --git a/mm/slab.c b/mm/slab.c index 1db4d731385..3070b929a1b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -26,7 +26,7 @@ * initialized objects. * * This means, that your constructor is used only for newly allocated - * slabs and you must pass objects with the same intializations to + * slabs and you must pass objects with the same initializations to * kmem_cache_free. * * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM, @@ -50,12 +50,12 @@ * The head array is strictly LIFO and should improve the cache hit rates. * On SMP, it additionally reduces the spinlock operations. * - * The c_cpuarray may not be read with enabled local interrupts - + * The c_cpuarray may not be read with enabled local interrupts - * it's changed with a smp_call_function(). * * SMP synchronization: * constructors and destructors are called without any locking. - * Several members in kmem_cache_t and struct slab never change, they + * Several members in struct kmem_cache and struct slab never change, they * are accessed without any locking. * The per-cpu arrays are never accessed from the wrong cpu, no locking, * and local interrupts are disabled so slab code is preempt-safe. @@ -68,7 +68,7 @@ * Further notes from the original documentation: * * 11 April '97. Started multi-threading - markhe - * The global cache-chain is protected by the semaphore 'cache_chain_sem'. + * The global cache-chain is protected by the mutex 'slab_mutex'. * The sem is only needed when accessing/extending the cache-chain, which * can never happen inside an interrupt (kmem_cache_create(), * kmem_cache_shrink() and kmem_cache_reap()). @@ -86,14 +86,16 @@ * All object allocations for a node occur from node specific slab lists. */ -#include <linux/config.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/poison.h> #include <linux/swap.h> #include <linux/cache.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/compiler.h> +#include <linux/cpuset.h> +#include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/kallsyms.h> @@ -102,16 +104,33 @@ #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/string.h> +#include <linux/uaccess.h> #include <linux/nodemask.h> +#include <linux/kmemleak.h> +#include <linux/mempolicy.h> +#include <linux/mutex.h> +#include <linux/fault-inject.h> +#include <linux/rtmutex.h> +#include <linux/reciprocal_div.h> +#include <linux/debugobjects.h> +#include <linux/kmemcheck.h> +#include <linux/memory.h> +#include <linux/prefetch.h> + +#include <net/sock.h> -#include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> +#include <trace/events/kmem.h> + +#include "internal.h" + +#include "slab.h" + /* - * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL, - * SLAB_RED_ZONE & SLAB_POISON. + * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON. * 0 for faster, smaller code (especially in the critical paths). * * STATS - 1 to collect stats for /proc/slabinfo. @@ -130,122 +149,30 @@ #define FORCED_DEBUG 0 #endif - /* Shouldn't this be in a header file somewhere? */ #define BYTES_PER_WORD sizeof(void *) - -#ifndef cache_line_size -#define cache_line_size() L1_CACHE_BYTES -#endif - -#ifndef ARCH_KMALLOC_MINALIGN -/* - * Enforce a minimum alignment for the kmalloc caches. - * Usually, the kmalloc caches are cache_line_size() aligned, except when - * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. - * Some archs want to perform DMA into kmalloc caches and need a guaranteed - * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that. - * Note that this flag disables some debug features. - */ -#define ARCH_KMALLOC_MINALIGN 0 -#endif - -#ifndef ARCH_SLAB_MINALIGN -/* - * Enforce a minimum alignment for all caches. - * Intended for archs that get misalignment faults even for BYTES_PER_WORD - * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. - * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables - * some debug features. - */ -#define ARCH_SLAB_MINALIGN 0 -#endif +#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) #ifndef ARCH_KMALLOC_FLAGS #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN #endif -/* Legal flag mask for kmem_cache_create(). */ -#if DEBUG -# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ - SLAB_POISON | SLAB_HWCACHE_ALIGN | \ - SLAB_NO_REAP | SLAB_CACHE_DMA | \ - SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU) +#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \ + <= SLAB_OBJ_MIN_SIZE) ? 1 : 0) + +#if FREELIST_BYTE_INDEX +typedef unsigned char freelist_idx_t; #else -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \ - SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ - SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU) +typedef unsigned short freelist_idx_t; #endif -/* - * kmem_bufctl_t: - * - * Bufctl's are used for linking objs within a slab - * linked offsets. - * - * This implementation relies on "struct page" for locating the cache & - * slab an object belongs to. - * This allows the bufctl structure to be small (one int), but limits - * the number of objects a slab (not a cache) can contain when off-slab - * bufctls are used. The limit is the size of the largest general cache - * that does not use off-slab slabs. - * For 32bit archs with 4 kB pages, is this 56. - * This is not serious, as it is only for large objects, when it is unwise - * to have too many per slab. - * Note: This limit can be raised by introducing a general cache whose size - * is less than 512 (PAGE_SIZE<<3), but greater than 256. - */ - -typedef unsigned int kmem_bufctl_t; -#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) -#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) -#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) - -/* Max number of objs-per-slab for caches which use off-slab slabs. - * Needed to avoid a possible looping condition in cache_grow(). - */ -static unsigned long offslab_limit; +#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1) /* - * struct slab - * - * Manages the objs in a slab. Placed either at the beginning of mem allocated - * for a slab, or allocated from an general cache. - * Slabs are chained into three list: fully used, partial, fully free slabs. + * true if a page was allocated from pfmemalloc reserves for network-based + * swap */ -struct slab { - struct list_head list; - unsigned long colouroff; - void *s_mem; /* including colour offset */ - unsigned int inuse; /* num of objs active in slab */ - kmem_bufctl_t free; - unsigned short nodeid; -}; - -/* - * struct slab_rcu - * - * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to - * arrange for kmem_freepages to be called via RCU. This is useful if - * we need to approach a kernel structure obliquely, from its address - * obtained without the usual locking. We can lock the structure to - * stabilize it and check it's still at the given address, only if we - * can be sure that the memory has not been meanwhile reused for some - * other kind of object (which our subsystem's lock might corrupt). - * - * rcu_read_lock before reading the address, then rcu_read_unlock after - * taking the spinlock within the structure expected at that address. - * - * We assume struct slab_rcu can overlay struct slab when destroying. - */ -struct slab_rcu { - struct rcu_head head; - kmem_cache_t *cachep; - void *addr; -}; +static bool pfmemalloc_active __read_mostly; /* * struct array_cache @@ -265,198 +192,125 @@ struct array_cache { unsigned int batchcount; unsigned int touched; spinlock_t lock; - void *entry[0]; /* - * Must have this definition in here for the proper - * alignment of array_cache. Also simplifies accessing - * the entries. - * [0] is for gcc 2.95. It should really be []. - */ + void *entry[]; /* + * Must have this definition in here for the proper + * alignment of array_cache. Also simplifies accessing + * the entries. + * + * Entries should not be directly dereferenced as + * entries belonging to slabs marked pfmemalloc will + * have the lower bits set SLAB_OBJ_PFMEMALLOC + */ }; -/* bootstrap: The caches do not work without cpuarrays anymore, - * but the cpuarrays are allocated from the generic caches... +#define SLAB_OBJ_PFMEMALLOC 1 +static inline bool is_obj_pfmemalloc(void *objp) +{ + return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC; +} + +static inline void set_obj_pfmemalloc(void **objp) +{ + *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC); + return; +} + +static inline void clear_obj_pfmemalloc(void **objp) +{ + *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC); +} + +/* + * bootstrap: The caches do not work without cpuarrays anymore, but the + * cpuarrays are allocated from the generic caches... */ #define BOOT_CPUCACHE_ENTRIES 1 struct arraycache_init { struct array_cache cache; - void * entries[BOOT_CPUCACHE_ENTRIES]; -}; - -/* - * The slab lists for all objects. - */ -struct kmem_list3 { - struct list_head slabs_partial; /* partial list first, better asm code */ - struct list_head slabs_full; - struct list_head slabs_free; - unsigned long free_objects; - unsigned long next_reap; - int free_touched; - unsigned int free_limit; - spinlock_t list_lock; - struct array_cache *shared; /* shared per node */ - struct array_cache **alien; /* on other nodes */ + void *entries[BOOT_CPUCACHE_ENTRIES]; }; /* * Need this for bootstrapping a per node allocator. */ -#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; +#define NUM_INIT_LISTS (3 * MAX_NUMNODES) +static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; #define CACHE_CACHE 0 -#define SIZE_AC 1 -#define SIZE_L3 (1 + MAX_NUMNODES) +#define SIZE_AC MAX_NUMNODES +#define SIZE_NODE (2 * MAX_NUMNODES) -/* - * This function must be completely optimized away if - * a constant is passed to it. Mostly the same as - * what is in linux/slab.h except it returns an - * index. - */ -static __always_inline int index_of(const size_t size) -{ - if (__builtin_constant_p(size)) { - int i = 0; +static int drain_freelist(struct kmem_cache *cache, + struct kmem_cache_node *n, int tofree); +static void free_block(struct kmem_cache *cachep, void **objpp, int len, + int node); +static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); +static void cache_reap(struct work_struct *unused); -#define CACHE(x) \ - if (size <=x) \ - return i; \ - else \ - i++; -#include "linux/kmalloc_sizes.h" -#undef CACHE - { - extern void __bad_size(void); - __bad_size(); - } - } else - BUG(); - return 0; -} +static int slab_early_init = 1; -#define INDEX_AC index_of(sizeof(struct arraycache_init)) -#define INDEX_L3 index_of(sizeof(struct kmem_list3)) +#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) +#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) -static inline void kmem_list3_init(struct kmem_list3 *parent) +static void kmem_cache_node_init(struct kmem_cache_node *parent) { INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->shared = NULL; parent->alien = NULL; + parent->colour_next = 0; spin_lock_init(&parent->list_lock); parent->free_objects = 0; parent->free_touched = 0; } -#define MAKE_LIST(cachep, listp, slab, nodeid) \ - do { \ - INIT_LIST_HEAD(listp); \ - list_splice(&(cachep->nodelists[nodeid]->slab), listp); \ +#define MAKE_LIST(cachep, listp, slab, nodeid) \ + do { \ + INIT_LIST_HEAD(listp); \ + list_splice(&(cachep->node[nodeid]->slab), listp); \ } while (0) -#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ - do { \ +#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ + do { \ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) -/* - * kmem_cache_t - * - * manages a cache. - */ - -struct kmem_cache { -/* 1) per-cpu data, touched during every alloc/free */ - struct array_cache *array[NR_CPUS]; - unsigned int batchcount; - unsigned int limit; - unsigned int shared; - unsigned int objsize; -/* 2) touched by every alloc & free from the backend */ - struct kmem_list3 *nodelists[MAX_NUMNODES]; - unsigned int flags; /* constant flags */ - unsigned int num; /* # of objs per slab */ - spinlock_t spinlock; - -/* 3) cache_grow/shrink */ - /* order of pgs per slab (2^n) */ - unsigned int gfporder; - - /* force GFP flags, e.g. GFP_DMA */ - gfp_t gfpflags; - - size_t colour; /* cache colouring range */ - unsigned int colour_off; /* colour offset */ - unsigned int colour_next; /* cache colouring */ - kmem_cache_t *slabp_cache; - unsigned int slab_size; - unsigned int dflags; /* dynamic flags */ - - /* constructor func */ - void (*ctor)(void *, kmem_cache_t *, unsigned long); - - /* de-constructor func */ - void (*dtor)(void *, kmem_cache_t *, unsigned long); - -/* 4) cache creation/removal */ - const char *name; - struct list_head next; - -/* 5) statistics */ -#if STATS - unsigned long num_active; - unsigned long num_allocations; - unsigned long high_mark; - unsigned long grown; - unsigned long reaped; - unsigned long errors; - unsigned long max_freeable; - unsigned long node_allocs; - unsigned long node_frees; - atomic_t allochit; - atomic_t allocmiss; - atomic_t freehit; - atomic_t freemiss; -#endif -#if DEBUG - int dbghead; - int reallen; -#endif -}; - #define CFLGS_OFF_SLAB (0x80000000UL) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) #define BATCHREFILL_LIMIT 16 -/* Optimization question: fewer reaps means less - * probability for unnessary cpucache drain/refill cycles. +/* + * Optimization question: fewer reaps means less probability for unnessary + * cpucache drain/refill cycles. * - * OTHO the cpuarrays can contain lots of objects, + * OTOH the cpuarrays can contain lots of objects, * which could lock up otherwise freeable slabs. */ -#define REAPTIMEOUT_CPUC (2*HZ) -#define REAPTIMEOUT_LIST3 (4*HZ) +#define REAPTIMEOUT_AC (2*HZ) +#define REAPTIMEOUT_NODE (4*HZ) #if STATS #define STATS_INC_ACTIVE(x) ((x)->num_active++) #define STATS_DEC_ACTIVE(x) ((x)->num_active--) #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) #define STATS_INC_GROWN(x) ((x)->grown++) -#define STATS_INC_REAPED(x) ((x)->reaped++) -#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \ - (x)->high_mark = (x)->num_active; \ - } while (0) +#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) +#define STATS_SET_HIGH(x) \ + do { \ + if ((x)->num_active > (x)->high_mark) \ + (x)->high_mark = (x)->num_active; \ + } while (0) #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) -#define STATS_SET_FREEABLE(x, i) \ - do { if ((x)->max_freeable < i) \ - (x)->max_freeable = i; \ - } while (0) - +#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) +#define STATS_SET_FREEABLE(x, i) \ + do { \ + if ((x)->max_freeable < i) \ + (x)->max_freeable = i; \ + } while (0) #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) @@ -466,14 +320,13 @@ struct kmem_cache { #define STATS_DEC_ACTIVE(x) do { } while (0) #define STATS_INC_ALLOCED(x) do { } while (0) #define STATS_INC_GROWN(x) do { } while (0) -#define STATS_INC_REAPED(x) do { } while (0) +#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) -#define STATS_SET_FREEABLE(x, i) \ - do { } while (0) - +#define STATS_INC_ACOVERFLOW(x) do { } while (0) +#define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) #define STATS_INC_FREEHIT(x) do { } while (0) @@ -481,241 +334,439 @@ struct kmem_cache { #endif #if DEBUG -/* Magic nums for obj red zoning. - * Placed in the first word before and the first word after an obj. - */ -#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */ -#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */ - -/* ...and for poisoning */ -#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */ -#define POISON_FREE 0x6b /* for use-after-free poisoning */ -#define POISON_END 0xa5 /* end-byte of poisoning */ -/* memory layout of objects: +/* + * memory layout of objects: * 0 : objp - * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that + * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that * the end of an object is aligned with the end of the real * allocation. Catches writes behind the end of the allocation. - * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1: + * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. - * cachep->dbghead: The real object. - * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] - * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long] + * cachep->obj_offset: The real object. + * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] + * cachep->size - 1* BYTES_PER_WORD: last caller address + * [BYTES_PER_WORD long] */ -static int obj_dbghead(kmem_cache_t *cachep) +static int obj_offset(struct kmem_cache *cachep) { - return cachep->dbghead; + return cachep->obj_offset; } -static int obj_reallen(kmem_cache_t *cachep) -{ - return cachep->reallen; -} - -static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp) +static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); - return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD); + return (unsigned long long*) (objp + obj_offset(cachep) - + sizeof(unsigned long long)); } -static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp) +static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) - return (unsigned long*) (objp+cachep->objsize-2*BYTES_PER_WORD); - return (unsigned long*) (objp+cachep->objsize-BYTES_PER_WORD); + return (unsigned long long *)(objp + cachep->size - + sizeof(unsigned long long) - + REDZONE_ALIGN); + return (unsigned long long *) (objp + cachep->size - + sizeof(unsigned long long)); } -static void **dbg_userword(kmem_cache_t *cachep, void *objp) +static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); - return (void**)(objp+cachep->objsize-BYTES_PER_WORD); + return (void **)(objp + cachep->size - BYTES_PER_WORD); } #else -#define obj_dbghead(x) 0 -#define obj_reallen(cachep) (cachep->objsize) -#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;}) -#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;}) +#define obj_offset(x) 0 +#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) +#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) #endif -/* - * Maximum size of an obj (in 2^order pages) - * and absolute limit for the gfp order. - */ -#if defined(CONFIG_LARGE_ALLOCS) -#define MAX_OBJ_ORDER 13 /* up to 32Mb */ -#define MAX_GFP_ORDER 13 /* up to 32Mb */ -#elif defined(CONFIG_MMU) -#define MAX_OBJ_ORDER 5 /* 32 pages */ -#define MAX_GFP_ORDER 5 /* 32 pages */ +#define OBJECT_FREE (0) +#define OBJECT_ACTIVE (1) + +#ifdef CONFIG_DEBUG_SLAB_LEAK + +static void set_obj_status(struct page *page, int idx, int val) +{ + int freelist_size; + char *status; + struct kmem_cache *cachep = page->slab_cache; + + freelist_size = cachep->num * sizeof(freelist_idx_t); + status = (char *)page->freelist + freelist_size; + status[idx] = val; +} + +static inline unsigned int get_obj_status(struct page *page, int idx) +{ + int freelist_size; + char *status; + struct kmem_cache *cachep = page->slab_cache; + + freelist_size = cachep->num * sizeof(freelist_idx_t); + status = (char *)page->freelist + freelist_size; + + return status[idx]; +} + #else -#define MAX_OBJ_ORDER 8 /* up to 1Mb */ -#define MAX_GFP_ORDER 8 /* up to 1Mb */ +static inline void set_obj_status(struct page *page, int idx, int val) {} + #endif /* - * Do not go above this order unless 0 objects fit into the slab. + * Do not go above this order unless 0 objects fit into the slab or + * overridden on the command line. */ -#define BREAK_GFP_ORDER_HI 1 -#define BREAK_GFP_ORDER_LO 0 -static int slab_break_gfp_order = BREAK_GFP_ORDER_LO; +#define SLAB_MAX_ORDER_HI 1 +#define SLAB_MAX_ORDER_LO 0 +static int slab_max_order = SLAB_MAX_ORDER_LO; +static bool slab_max_order_set __initdata; -/* Macros for storing/retrieving the cachep and or slab from the - * global 'mem_map'. These are used to find the slab an obj belongs to. - * With kfree(), these are used to find the cache which an obj belongs to. - */ -#define SET_PAGE_CACHE(pg,x) ((pg)->lru.next = (struct list_head *)(x)) -#define GET_PAGE_CACHE(pg) ((kmem_cache_t *)(pg)->lru.next) -#define SET_PAGE_SLAB(pg,x) ((pg)->lru.prev = (struct list_head *)(x)) -#define GET_PAGE_SLAB(pg) ((struct slab *)(pg)->lru.prev) - -/* These are the default caches for kmalloc. Custom caches can have other sizes. */ -struct cache_sizes malloc_sizes[] = { -#define CACHE(x) { .cs_size = (x) }, -#include <linux/kmalloc_sizes.h> - CACHE(ULONG_MAX) -#undef CACHE -}; -EXPORT_SYMBOL(malloc_sizes); +static inline struct kmem_cache *virt_to_cache(const void *obj) +{ + struct page *page = virt_to_head_page(obj); + return page->slab_cache; +} -/* Must match cache_sizes above. Out of line to keep cache footprint low. */ -struct cache_names { - char *name; - char *name_dma; -}; +static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, + unsigned int idx) +{ + return page->s_mem + cache->size * idx; +} -static struct cache_names __initdata cache_names[] = { -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, -#include <linux/kmalloc_sizes.h> - { NULL, } -#undef CACHE -}; +/* + * We want to avoid an expensive divide : (offset / cache->size) + * Using the fact that size is a constant for a particular cache, + * we can replace (offset / cache->size) by + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + u32 offset = (obj - page->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +} -static struct arraycache_init initarray_cache __initdata = - { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; static struct arraycache_init initarray_generic = - { { 0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; /* internal cache of cache description objs */ -static kmem_cache_t cache_cache = { - .batchcount = 1, - .limit = BOOT_CPUCACHE_ENTRIES, - .shared = 1, - .objsize = sizeof(kmem_cache_t), - .flags = SLAB_NO_REAP, - .spinlock = SPIN_LOCK_UNLOCKED, - .name = "kmem_cache", -#if DEBUG - .reallen = sizeof(kmem_cache_t), -#endif +static struct kmem_cache kmem_cache_boot = { + .batchcount = 1, + .limit = BOOT_CPUCACHE_ENTRIES, + .shared = 1, + .size = sizeof(struct kmem_cache), + .name = "kmem_cache", }; -/* Guard access to the cache-chain. */ -static struct semaphore cache_chain_sem; -static struct list_head cache_chain; +#define BAD_ALIEN_MAGIC 0x01020304ul + +#ifdef CONFIG_LOCKDEP /* - * vm_enough_memory() looks at this to determine how many - * slab-allocated pages are possibly freeable under pressure + * Slab sometimes uses the kmalloc slabs to store the slab headers + * for other slabs "off slab". + * The locking for this is tricky in that it nests within the locks + * of all other slabs in a few places; to deal with this special + * locking we put on-slab caches into a separate lock-class. * - * SLAB_RECLAIM_ACCOUNT turns this on per-slab + * We set lock class for alien array caches which are up during init. + * The lock annotation will be lost if all cpus of a node goes down and + * then comes back up during hotplug */ -atomic_t slab_reclaim_pages; +static struct lock_class_key on_slab_l3_key; +static struct lock_class_key on_slab_alc_key; -/* - * chicken and egg problem: delay the per-cpu array allocation - * until the general caches are up. - */ -static enum { - NONE, - PARTIAL_AC, - PARTIAL_L3, - FULL -} g_cpucache_up; +static struct lock_class_key debugobj_l3_key; +static struct lock_class_key debugobj_alc_key; -static DEFINE_PER_CPU(struct work_struct, reap_work); +static void slab_set_lock_classes(struct kmem_cache *cachep, + struct lock_class_key *l3_key, struct lock_class_key *alc_key, + int q) +{ + struct array_cache **alc; + struct kmem_cache_node *n; + int r; -static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node); -static void enable_cpucache (kmem_cache_t *cachep); -static void cache_reap (void *unused); -static int __node_shrink(kmem_cache_t *cachep, int node); + n = cachep->node[q]; + if (!n) + return; -static inline struct array_cache *ac_data(kmem_cache_t *cachep) + lockdep_set_class(&n->list_lock, l3_key); + alc = n->alien; + /* + * FIXME: This check for BAD_ALIEN_MAGIC + * should go away when common slab code is taught to + * work even without alien caches. + * Currently, non NUMA code returns BAD_ALIEN_MAGIC + * for alloc_alien_cache, + */ + if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) + return; + for_each_node(r) { + if (alc[r]) + lockdep_set_class(&alc[r]->lock, alc_key); + } +} + +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) { - return cachep->array[smp_processor_id()]; + slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node); } -static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags) +static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) { - struct cache_sizes *csizep = malloc_sizes; + int node; -#if DEBUG - /* This happens if someone tries to call - * kmem_cache_create(), or __kmalloc(), before - * the generic caches are initialized. - */ - BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL); + for_each_online_node(node) + slab_set_debugobj_lock_classes_node(cachep, node); +} + +static void init_node_lock_keys(int q) +{ + int i; + + if (slab_state < UP) + return; + + for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { + struct kmem_cache_node *n; + struct kmem_cache *cache = kmalloc_caches[i]; + + if (!cache) + continue; + + n = cache->node[q]; + if (!n || OFF_SLAB(cache)) + continue; + + slab_set_lock_classes(cache, &on_slab_l3_key, + &on_slab_alc_key, q); + } +} + +static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) +{ + if (!cachep->node[q]) + return; + + slab_set_lock_classes(cachep, &on_slab_l3_key, + &on_slab_alc_key, q); +} + +static inline void on_slab_lock_classes(struct kmem_cache *cachep) +{ + int node; + + VM_BUG_ON(OFF_SLAB(cachep)); + for_each_node(node) + on_slab_lock_classes_node(cachep, node); +} + +static inline void init_lock_keys(void) +{ + int node; + + for_each_node(node) + init_node_lock_keys(node); +} +#else +static void init_node_lock_keys(int q) +{ +} + +static inline void init_lock_keys(void) +{ +} + +static inline void on_slab_lock_classes(struct kmem_cache *cachep) +{ +} + +static inline void on_slab_lock_classes_node(struct kmem_cache *cachep, int node) +{ +} + +static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node) +{ +} + +static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep) +{ +} #endif - while (size > csizep->cs_size) - csizep++; - /* - * Really subtle: The last entry with cs->cs_size==ULONG_MAX - * has cs_{dma,}cachep==NULL. Thus no special case - * for large kmalloc calls required. - */ - if (unlikely(gfpflags & GFP_DMA)) - return csizep->cs_dmacachep; - return csizep->cs_cachep; +static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); + +static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) +{ + return cachep->array[smp_processor_id()]; } -kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags) +static size_t calculate_freelist_size(int nr_objs, size_t align) { - return __find_general_cachep(size, gfpflags); + size_t freelist_size; + + freelist_size = nr_objs * sizeof(freelist_idx_t); + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + freelist_size += nr_objs * sizeof(char); + + if (align) + freelist_size = ALIGN(freelist_size, align); + + return freelist_size; } -EXPORT_SYMBOL(kmem_find_general_cachep); -/* Cal the num objs, wastage, and bytes left over for a given slab size. */ -static void cache_estimate(unsigned long gfporder, size_t size, size_t align, - int flags, size_t *left_over, unsigned int *num) +static int calculate_nr_objs(size_t slab_size, size_t buffer_size, + size_t idx_size, size_t align) { - int i; - size_t wastage = PAGE_SIZE<<gfporder; - size_t extra = 0; - size_t base = 0; + int nr_objs; + size_t remained_size; + size_t freelist_size; + int extra_space = 0; - if (!(flags & CFLGS_OFF_SLAB)) { - base = sizeof(struct slab); - extra = sizeof(kmem_bufctl_t); - } - i = 0; - while (i*size + ALIGN(base+i*extra, align) <= wastage) - i++; - if (i > 0) - i--; + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + extra_space = sizeof(char); + /* + * Ignore padding for the initial guess. The padding + * is at most @align-1 bytes, and @buffer_size is at + * least @align. In the worst case, this result will + * be one greater than the number of objects that fit + * into the memory allocation when taking the padding + * into account. + */ + nr_objs = slab_size / (buffer_size + idx_size + extra_space); - if (i > SLAB_LIMIT) - i = SLAB_LIMIT; + /* + * This calculated number will be either the right + * amount, or one greater than what we want. + */ + remained_size = slab_size - nr_objs * buffer_size; + freelist_size = calculate_freelist_size(nr_objs, align); + if (remained_size < freelist_size) + nr_objs--; + + return nr_objs; +} + +/* + * Calculate the number of objects and left-over bytes for a given buffer size. + */ +static void cache_estimate(unsigned long gfporder, size_t buffer_size, + size_t align, int flags, size_t *left_over, + unsigned int *num) +{ + int nr_objs; + size_t mgmt_size; + size_t slab_size = PAGE_SIZE << gfporder; + + /* + * The slab management structure can be either off the slab or + * on it. For the latter case, the memory allocated for a + * slab is used for: + * + * - One unsigned int for each object + * - Padding to respect alignment of @align + * - @buffer_size bytes for each object + * + * If the slab management structure is off the slab, then the + * alignment will already be calculated into the size. Because + * the slabs are all pages aligned, the objects will be at the + * correct alignment when allocated. + */ + if (flags & CFLGS_OFF_SLAB) { + mgmt_size = 0; + nr_objs = slab_size / buffer_size; - *num = i; - wastage -= i*size; - wastage -= ALIGN(base+i*extra, align); - *left_over = wastage; + } else { + nr_objs = calculate_nr_objs(slab_size, buffer_size, + sizeof(freelist_idx_t), align); + mgmt_size = calculate_freelist_size(nr_objs, align); + } + *num = nr_objs; + *left_over = slab_size - nr_objs*buffer_size - mgmt_size; } -#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg) +#if DEBUG +#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) -static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) +static void __slab_error(const char *function, struct kmem_cache *cachep, + char *msg) { printk(KERN_ERR "slab error in %s(): cache `%s': %s\n", - function, cachep->name, msg); + function, cachep->name, msg); dump_stack(); + add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } +#endif + +/* + * By default on NUMA we use alien caches to stage the freeing of + * objects allocated from other nodes. This causes massive memory + * inefficiencies when using fake NUMA setup to split memory into a + * large number of small nodes, so it can be disabled on the command + * line + */ + +static int use_alien_caches __read_mostly = 1; +static int __init noaliencache_setup(char *s) +{ + use_alien_caches = 0; + return 1; +} +__setup("noaliencache", noaliencache_setup); + +static int __init slab_max_order_setup(char *str) +{ + get_option(&str, &slab_max_order); + slab_max_order = slab_max_order < 0 ? 0 : + min(slab_max_order, MAX_ORDER - 1); + slab_max_order_set = true; + + return 1; +} +__setup("slab_max_order=", slab_max_order_setup); + +#ifdef CONFIG_NUMA +/* + * Special reaping functions for NUMA systems called from cache_reap(). + * These take care of doing round robin flushing of alien caches (containing + * objects freed on different nodes from which they were allocated) and the + * flushing of remote pcps by calling drain_node_pages. + */ +static DEFINE_PER_CPU(unsigned long, slab_reap_node); + +static void init_reap_node(int cpu) +{ + int node; + + node = next_node(cpu_to_mem(cpu), node_online_map); + if (node == MAX_NUMNODES) + node = first_node(node_online_map); + + per_cpu(slab_reap_node, cpu) = node; +} + +static void next_reap_node(void) +{ + int node = __this_cpu_read(slab_reap_node); + + node = next_node(node, node_online_map); + if (unlikely(node >= MAX_NUMNODES)) + node = first_node(node_online_map); + __this_cpu_write(slab_reap_node, node); +} + +#else +#define init_reap_node(cpu) do { } while (0) +#define next_reap_node(void) do { } while (0) +#endif /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz @@ -724,28 +775,38 @@ static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg) * the CPUs getting into lockstep and contending for the global cache chain * lock. */ -static void __devinit start_cpu_timer(int cpu) +static void start_cpu_timer(int cpu) { - struct work_struct *reap_work = &per_cpu(reap_work, cpu); + struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), * init_workqueues() has already run, so keventd will be setup * at that time. */ - if (keventd_up() && reap_work->func == NULL) { - INIT_WORK(reap_work, cache_reap, NULL); - schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); + if (keventd_up() && reap_work->work.func == NULL) { + init_reap_node(cpu); + INIT_DEFERRABLE_WORK(reap_work, cache_reap); + schedule_delayed_work_on(cpu, reap_work, + __round_jiffies_relative(HZ, cpu)); } } static struct array_cache *alloc_arraycache(int node, int entries, - int batchcount) + int batchcount, gfp_t gfp) { - int memsize = sizeof(void*)*entries+sizeof(struct array_cache); + int memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *nc = NULL; - nc = kmalloc_node(memsize, GFP_KERNEL, node); + nc = kmalloc_node(memsize, gfp, node); + /* + * The array_cache structures contain pointers to free object. + * However, when such objects are allocated or transferred to another + * cache the pointers are not cleared and they could be counted as + * valid references during a kmemleak scan. Therefore, kmemleak must + * not scan such objects. + */ + kmemleak_no_scan(nc); if (nc) { nc->avail = 0; nc->limit = entries; @@ -756,25 +817,197 @@ static struct array_cache *alloc_arraycache(int node, int entries, return nc; } -#ifdef CONFIG_NUMA -static inline struct array_cache **alloc_alien_cache(int node, int limit) +static inline bool is_slab_pfmemalloc(struct page *page) +{ + return PageSlabPfmemalloc(page); +} + +/* Clears pfmemalloc_active if no slabs have pfmalloc set */ +static void recheck_pfmemalloc_active(struct kmem_cache *cachep, + struct array_cache *ac) +{ + struct kmem_cache_node *n = cachep->node[numa_mem_id()]; + struct page *page; + unsigned long flags; + + if (!pfmemalloc_active) + return; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->slabs_full, lru) + if (is_slab_pfmemalloc(page)) + goto out; + + list_for_each_entry(page, &n->slabs_partial, lru) + if (is_slab_pfmemalloc(page)) + goto out; + + list_for_each_entry(page, &n->slabs_free, lru) + if (is_slab_pfmemalloc(page)) + goto out; + + pfmemalloc_active = false; +out: + spin_unlock_irqrestore(&n->list_lock, flags); +} + +static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, + gfp_t flags, bool force_refill) +{ + int i; + void *objp = ac->entry[--ac->avail]; + + /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ + if (unlikely(is_obj_pfmemalloc(objp))) { + struct kmem_cache_node *n; + + if (gfp_pfmemalloc_allowed(flags)) { + clear_obj_pfmemalloc(&objp); + return objp; + } + + /* The caller cannot use PFMEMALLOC objects, find another one */ + for (i = 0; i < ac->avail; i++) { + /* If a !PFMEMALLOC object is found, swap them */ + if (!is_obj_pfmemalloc(ac->entry[i])) { + objp = ac->entry[i]; + ac->entry[i] = ac->entry[ac->avail]; + ac->entry[ac->avail] = objp; + return objp; + } + } + + /* + * If there are empty slabs on the slabs_free list and we are + * being forced to refill the cache, mark this one !pfmemalloc. + */ + n = cachep->node[numa_mem_id()]; + if (!list_empty(&n->slabs_free) && force_refill) { + struct page *page = virt_to_head_page(objp); + ClearPageSlabPfmemalloc(page); + clear_obj_pfmemalloc(&objp); + recheck_pfmemalloc_active(cachep, ac); + return objp; + } + + /* No !PFMEMALLOC objects available */ + ac->avail++; + objp = NULL; + } + + return objp; +} + +static inline void *ac_get_obj(struct kmem_cache *cachep, + struct array_cache *ac, gfp_t flags, bool force_refill) +{ + void *objp; + + if (unlikely(sk_memalloc_socks())) + objp = __ac_get_obj(cachep, ac, flags, force_refill); + else + objp = ac->entry[--ac->avail]; + + return objp; +} + +static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, + void *objp) +{ + if (unlikely(pfmemalloc_active)) { + /* Some pfmemalloc slabs exist, check if this is one */ + struct page *page = virt_to_head_page(objp); + if (PageSlabPfmemalloc(page)) + set_obj_pfmemalloc(&objp); + } + + return objp; +} + +static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, + void *objp) +{ + if (unlikely(sk_memalloc_socks())) + objp = __ac_put_obj(cachep, ac, objp); + + ac->entry[ac->avail++] = objp; +} + +/* + * Transfer objects in one arraycache to another. + * Locking must be handled by the caller. + * + * Return the number of entries transferred. + */ +static int transfer_objects(struct array_cache *to, + struct array_cache *from, unsigned int max) +{ + /* Figure out how many entries to transfer */ + int nr = min3(from->avail, max, to->limit - to->avail); + + if (!nr) + return 0; + + memcpy(to->entry + to->avail, from->entry + from->avail -nr, + sizeof(void *) *nr); + + from->avail -= nr; + to->avail += nr; + return nr; +} + +#ifndef CONFIG_NUMA + +#define drain_alien_cache(cachep, alien) do { } while (0) +#define reap_alien(cachep, n) do { } while (0) + +static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) +{ + return (struct array_cache **)BAD_ALIEN_MAGIC; +} + +static inline void free_alien_cache(struct array_cache **ac_ptr) +{ +} + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + return 0; +} + +static inline void *alternate_node_alloc(struct kmem_cache *cachep, + gfp_t flags) +{ + return NULL; +} + +static inline void *____cache_alloc_node(struct kmem_cache *cachep, + gfp_t flags, int nodeid) +{ + return NULL; +} + +#else /* CONFIG_NUMA */ + +static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); +static void *alternate_node_alloc(struct kmem_cache *, gfp_t); + +static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { struct array_cache **ac_ptr; - int memsize = sizeof(void*)*MAX_NUMNODES; + int memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) limit = 12; - ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); + ac_ptr = kzalloc_node(memsize, gfp, node); if (ac_ptr) { for_each_node(i) { - if (i == node || !node_online(i)) { - ac_ptr[i] = NULL; + if (i == node || !node_online(i)) continue; - } - ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d); + ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp); if (!ac_ptr[i]) { - for (i--; i <=0; i--) + for (i--; i >= 0; i--) kfree(ac_ptr[i]); kfree(ac_ptr); return NULL; @@ -784,39 +1017,64 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) return ac_ptr; } -static inline void free_alien_cache(struct array_cache **ac_ptr) +static void free_alien_cache(struct array_cache **ac_ptr) { int i; if (!ac_ptr) return; - for_each_node(i) - kfree(ac_ptr[i]); - + kfree(ac_ptr[i]); kfree(ac_ptr); } -static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache *ac, int node) +static void __drain_alien_cache(struct kmem_cache *cachep, + struct array_cache *ac, int node) { - struct kmem_list3 *rl3 = cachep->nodelists[node]; + struct kmem_cache_node *n = cachep->node[node]; if (ac->avail) { - spin_lock(&rl3->list_lock); + spin_lock(&n->list_lock); + /* + * Stuff objects into the remote nodes shared array first. + * That way we could avoid the overhead of putting the objects + * into the free lists and getting them back later. + */ + if (n->shared) + transfer_objects(n->shared, ac, ac->limit); + free_block(cachep, ac->entry, ac->avail, node); ac->avail = 0; - spin_unlock(&rl3->list_lock); + spin_unlock(&n->list_lock); + } +} + +/* + * Called from cache_reap() to regularly drain alien caches round robin. + */ +static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) +{ + int node = __this_cpu_read(slab_reap_node); + + if (n->alien) { + struct array_cache *ac = n->alien[node]; + + if (ac && ac->avail && spin_trylock_irq(&ac->lock)) { + __drain_alien_cache(cachep, ac, node); + spin_unlock_irq(&ac->lock); + } } } -static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) +static void drain_alien_cache(struct kmem_cache *cachep, + struct array_cache **alien) { - int i=0; + int i = 0; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { - ac = l3->alien[i]; + ac = alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); __drain_alien_cache(cachep, ac, i); @@ -824,339 +1082,575 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3) } } } -#else -#define alloc_alien_cache(node, limit) do { } while (0) -#define free_alien_cache(ac_ptr) do { } while (0) -#define drain_alien_cache(cachep, l3) do { } while (0) + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + int nodeid = page_to_nid(virt_to_page(objp)); + struct kmem_cache_node *n; + struct array_cache *alien = NULL; + int node; + + node = numa_mem_id(); + + /* + * Make sure we are not freeing a object from another node to the array + * cache on this cpu. + */ + if (likely(nodeid == node)) + return 0; + + n = cachep->node[node]; + STATS_INC_NODEFREES(cachep); + if (n->alien && n->alien[nodeid]) { + alien = n->alien[nodeid]; + spin_lock(&alien->lock); + if (unlikely(alien->avail == alien->limit)) { + STATS_INC_ACOVERFLOW(cachep); + __drain_alien_cache(cachep, alien, nodeid); + } + ac_put_obj(cachep, alien, objp); + spin_unlock(&alien->lock); + } else { + spin_lock(&(cachep->node[nodeid])->list_lock); + free_block(cachep, &objp, 1, nodeid); + spin_unlock(&(cachep->node[nodeid])->list_lock); + } + return 1; +} #endif -static int __devinit cpuup_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +/* + * Allocates and initializes node for a node on each slab cache, used for + * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node + * will be allocated off-node since memory is not yet online for the new node. + * When hotplugging memory or a cpu, existing node are not replaced if + * already in use. + * + * Must hold slab_mutex. + */ +static int init_cache_node_node(int node) { - long cpu = (long)hcpu; - kmem_cache_t* cachep; - struct kmem_list3 *l3 = NULL; - int node = cpu_to_node(cpu); - int memsize = sizeof(struct kmem_list3); - struct array_cache *nc = NULL; + struct kmem_cache *cachep; + struct kmem_cache_node *n; + const int memsize = sizeof(struct kmem_cache_node); - switch (action) { - case CPU_UP_PREPARE: - down(&cache_chain_sem); - /* we need to do this right in the beginning since - * alloc_arraycache's are going to use this list. - * kmalloc_node allows us to add the slab to the right - * kmem_list3 and not this cpu's kmem_list3 + list_for_each_entry(cachep, &slab_caches, list) { + /* + * Set up the kmem_cache_node for cpu before we can + * begin anything. Make sure some other cpu on this + * node has not already allocated this */ + if (!cachep->node[node]) { + n = kmalloc_node(memsize, GFP_KERNEL, node); + if (!n) + return -ENOMEM; + kmem_cache_node_init(n); + n->next_reap = jiffies + REAPTIMEOUT_NODE + + ((unsigned long)cachep) % REAPTIMEOUT_NODE; - list_for_each_entry(cachep, &cache_chain, next) { - /* setup the size64 kmemlist for cpu before we can - * begin anything. Make sure some other cpu on this - * node has not already allocated this + /* + * The kmem_cache_nodes don't come and go as CPUs + * come and go. slab_mutex is sufficient + * protection here. */ - if (!cachep->nodelists[node]) { - if (!(l3 = kmalloc_node(memsize, - GFP_KERNEL, node))) - goto bad; - kmem_list3_init(l3); - l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; - - cachep->nodelists[node] = l3; - } + cachep->node[node] = n; + } + + spin_lock_irq(&cachep->node[node]->list_lock); + cachep->node[node]->free_limit = + (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; + spin_unlock_irq(&cachep->node[node]->list_lock); + } + return 0; +} - spin_lock_irq(&cachep->nodelists[node]->list_lock); - cachep->nodelists[node]->free_limit = - (1 + nr_cpus_node(node)) * - cachep->batchcount + cachep->num; - spin_unlock_irq(&cachep->nodelists[node]->list_lock); +static inline int slabs_tofree(struct kmem_cache *cachep, + struct kmem_cache_node *n) +{ + return (n->free_objects + cachep->num - 1) / cachep->num; +} + +static void cpuup_canceled(long cpu) +{ + struct kmem_cache *cachep; + struct kmem_cache_node *n = NULL; + int node = cpu_to_mem(cpu); + const struct cpumask *mask = cpumask_of_node(node); + + list_for_each_entry(cachep, &slab_caches, list) { + struct array_cache *nc; + struct array_cache *shared; + struct array_cache **alien; + + /* cpu is dead; no one can alloc from it. */ + nc = cachep->array[cpu]; + cachep->array[cpu] = NULL; + n = cachep->node[node]; + + if (!n) + goto free_array_cache; + + spin_lock_irq(&n->list_lock); + + /* Free limit for this kmem_cache_node */ + n->free_limit -= cachep->batchcount; + if (nc) + free_block(cachep, nc->entry, nc->avail, node); + + if (!cpumask_empty(mask)) { + spin_unlock_irq(&n->list_lock); + goto free_array_cache; + } + + shared = n->shared; + if (shared) { + free_block(cachep, shared->entry, + shared->avail, node); + n->shared = NULL; } - /* Now we can go ahead with allocating the shared array's - & array cache's */ - list_for_each_entry(cachep, &cache_chain, next) { - nc = alloc_arraycache(node, cachep->limit, - cachep->batchcount); - if (!nc) + alien = n->alien; + n->alien = NULL; + + spin_unlock_irq(&n->list_lock); + + kfree(shared); + if (alien) { + drain_alien_cache(cachep, alien); + free_alien_cache(alien); + } +free_array_cache: + kfree(nc); + } + /* + * In the previous loop, all the objects were freed to + * the respective cache's slabs, now we can go ahead and + * shrink each nodelist to its limit. + */ + list_for_each_entry(cachep, &slab_caches, list) { + n = cachep->node[node]; + if (!n) + continue; + drain_freelist(cachep, n, slabs_tofree(cachep, n)); + } +} + +static int cpuup_prepare(long cpu) +{ + struct kmem_cache *cachep; + struct kmem_cache_node *n = NULL; + int node = cpu_to_mem(cpu); + int err; + + /* + * We need to do this right in the beginning since + * alloc_arraycache's are going to use this list. + * kmalloc_node allows us to add the slab to the right + * kmem_cache_node and not this cpu's kmem_cache_node + */ + err = init_cache_node_node(node); + if (err < 0) + goto bad; + + /* + * Now we can go ahead with allocating the shared arrays and + * array caches + */ + list_for_each_entry(cachep, &slab_caches, list) { + struct array_cache *nc; + struct array_cache *shared = NULL; + struct array_cache **alien = NULL; + + nc = alloc_arraycache(node, cachep->limit, + cachep->batchcount, GFP_KERNEL); + if (!nc) + goto bad; + if (cachep->shared) { + shared = alloc_arraycache(node, + cachep->shared * cachep->batchcount, + 0xbaadf00d, GFP_KERNEL); + if (!shared) { + kfree(nc); goto bad; - cachep->array[cpu] = nc; - - l3 = cachep->nodelists[node]; - BUG_ON(!l3); - if (!l3->shared) { - if (!(nc = alloc_arraycache(node, - cachep->shared*cachep->batchcount, - 0xbaadf00d))) - goto bad; - - /* we are serialised from CPU_DEAD or - CPU_UP_CANCELLED by the cpucontrol lock */ - l3->shared = nc; } } - up(&cache_chain_sem); + if (use_alien_caches) { + alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); + if (!alien) { + kfree(shared); + kfree(nc); + goto bad; + } + } + cachep->array[cpu] = nc; + n = cachep->node[node]; + BUG_ON(!n); + + spin_lock_irq(&n->list_lock); + if (!n->shared) { + /* + * We are serialised from CPU_DEAD or + * CPU_UP_CANCELLED by the cpucontrol lock + */ + n->shared = shared; + shared = NULL; + } +#ifdef CONFIG_NUMA + if (!n->alien) { + n->alien = alien; + alien = NULL; + } +#endif + spin_unlock_irq(&n->list_lock); + kfree(shared); + free_alien_cache(alien); + if (cachep->flags & SLAB_DEBUG_OBJECTS) + slab_set_debugobj_lock_classes_node(cachep, node); + else if (!OFF_SLAB(cachep) && + !(cachep->flags & SLAB_DESTROY_BY_RCU)) + on_slab_lock_classes_node(cachep, node); + } + init_node_lock_keys(node); + + return 0; +bad: + cpuup_canceled(cpu); + return -ENOMEM; +} + +static int cpuup_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + int err = 0; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + mutex_lock(&slab_mutex); + err = cpuup_prepare(cpu); + mutex_unlock(&slab_mutex); break; case CPU_ONLINE: + case CPU_ONLINE_FROZEN: start_cpu_timer(cpu); break; #ifdef CONFIG_HOTPLUG_CPU + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + /* + * Shutdown cache reaper. Note that the slab_mutex is + * held so that if cache_reap() is invoked it cannot do + * anything expensive but will only modify reap_work + * and reschedule the timer. + */ + cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); + /* Now the cache_reaper is guaranteed to be not running. */ + per_cpu(slab_reap_work, cpu).work.func = NULL; + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + start_cpu_timer(cpu); + break; case CPU_DEAD: - /* fall thru */ + case CPU_DEAD_FROZEN: + /* + * Even if all the cpus of a node are down, we don't free the + * kmem_cache_node of any cache. This to avoid a race between + * cpu_down, and a kmalloc allocation from another cpu for + * memory from the node of the cpu going down. The node + * structure is usually allocated from kmem_cache_create() and + * gets destroyed at kmem_cache_destroy(). + */ + /* fall through */ +#endif case CPU_UP_CANCELED: - down(&cache_chain_sem); - - list_for_each_entry(cachep, &cache_chain, next) { - struct array_cache *nc; - cpumask_t mask; - - mask = node_to_cpumask(node); - spin_lock_irq(&cachep->spinlock); - /* cpu is dead; no one can alloc from it. */ - nc = cachep->array[cpu]; - cachep->array[cpu] = NULL; - l3 = cachep->nodelists[node]; - - if (!l3) - goto unlock_cache; - - spin_lock(&l3->list_lock); - - /* Free limit for this kmem_list3 */ - l3->free_limit -= cachep->batchcount; - if (nc) - free_block(cachep, nc->entry, nc->avail, node); - - if (!cpus_empty(mask)) { - spin_unlock(&l3->list_lock); - goto unlock_cache; - } - - if (l3->shared) { - free_block(cachep, l3->shared->entry, - l3->shared->avail, node); - kfree(l3->shared); - l3->shared = NULL; - } - if (l3->alien) { - drain_alien_cache(cachep, l3); - free_alien_cache(l3->alien); - l3->alien = NULL; - } + case CPU_UP_CANCELED_FROZEN: + mutex_lock(&slab_mutex); + cpuup_canceled(cpu); + mutex_unlock(&slab_mutex); + break; + } + return notifier_from_errno(err); +} - /* free slabs belonging to this node */ - if (__node_shrink(cachep, node)) { - cachep->nodelists[node] = NULL; - spin_unlock(&l3->list_lock); - kfree(l3); - } else { - spin_unlock(&l3->list_lock); - } -unlock_cache: - spin_unlock_irq(&cachep->spinlock); - kfree(nc); +static struct notifier_block cpucache_notifier = { + &cpuup_callback, NULL, 0 +}; + +#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) +/* + * Drains freelist for a node on each slab cache, used for memory hot-remove. + * Returns -EBUSY if all objects cannot be drained so that the node is not + * removed. + * + * Must hold slab_mutex. + */ +static int __meminit drain_cache_node_node(int node) +{ + struct kmem_cache *cachep; + int ret = 0; + + list_for_each_entry(cachep, &slab_caches, list) { + struct kmem_cache_node *n; + + n = cachep->node[node]; + if (!n) + continue; + + drain_freelist(cachep, n, slabs_tofree(cachep, n)); + + if (!list_empty(&n->slabs_full) || + !list_empty(&n->slabs_partial)) { + ret = -EBUSY; + break; } - up(&cache_chain_sem); - break; -#endif } - return NOTIFY_OK; -bad: - up(&cache_chain_sem); - return NOTIFY_BAD; + return ret; } -static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 }; +static int __meminit slab_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct memory_notify *mnb = arg; + int ret = 0; + int nid; + + nid = mnb->status_change_nid; + if (nid < 0) + goto out; + + switch (action) { + case MEM_GOING_ONLINE: + mutex_lock(&slab_mutex); + ret = init_cache_node_node(nid); + mutex_unlock(&slab_mutex); + break; + case MEM_GOING_OFFLINE: + mutex_lock(&slab_mutex); + ret = drain_cache_node_node(nid); + mutex_unlock(&slab_mutex); + break; + case MEM_ONLINE: + case MEM_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + break; + } +out: + return notifier_from_errno(ret); +} +#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ /* - * swap the static kmem_list3 with kmalloced memory + * swap the static kmem_cache_node with kmalloced memory */ -static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, - int nodeid) +static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, + int nodeid) { - struct kmem_list3 *ptr; + struct kmem_cache_node *ptr; - BUG_ON(cachep->nodelists[nodeid] != list); - ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid); + ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid); BUG_ON(!ptr); - local_irq_disable(); - memcpy(ptr, list, sizeof(struct kmem_list3)); + memcpy(ptr, list, sizeof(struct kmem_cache_node)); + /* + * Do not assume that spinlocks can be initialized via memcpy: + */ + spin_lock_init(&ptr->list_lock); + MAKE_ALL_LISTS(cachep, ptr, nodeid); - cachep->nodelists[nodeid] = ptr; - local_irq_enable(); + cachep->node[nodeid] = ptr; } -/* Initialisation. - * Called after the gfp() functions have been enabled, and before smp_init(). +/* + * For setting up all the kmem_cache_node for cache whose buffer_size is same as + * size of kmem_cache_node. + */ +static void __init set_up_node(struct kmem_cache *cachep, int index) +{ + int node; + + for_each_online_node(node) { + cachep->node[node] = &init_kmem_cache_node[index + node]; + cachep->node[node]->next_reap = jiffies + + REAPTIMEOUT_NODE + + ((unsigned long)cachep) % REAPTIMEOUT_NODE; + } +} + +/* + * The memory after the last cpu cache pointer is used for the + * the node pointer. + */ +static void setup_node_pointer(struct kmem_cache *cachep) +{ + cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; +} + +/* + * Initialisation. Called after the page allocator have been initialised and + * before smp_init(). */ void __init kmem_cache_init(void) { - size_t left_over; - struct cache_sizes *sizes; - struct cache_names *names; int i; - for (i = 0; i < NUM_INIT_LISTS; i++) { - kmem_list3_init(&initkmem_list3[i]); - if (i < MAX_NUMNODES) - cache_cache.nodelists[i] = NULL; - } + BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < + sizeof(struct rcu_head)); + kmem_cache = &kmem_cache_boot; + setup_node_pointer(kmem_cache); + + if (num_possible_nodes() == 1) + use_alien_caches = 0; + + for (i = 0; i < NUM_INIT_LISTS; i++) + kmem_cache_node_init(&init_kmem_cache_node[i]); + + set_up_node(kmem_cache, CACHE_CACHE); /* * Fragmentation resistance on low memory - only use bigger - * page orders on machines with more than 32MB of memory. + * page orders on machines with more than 32MB of memory if + * not overridden on the command line. */ - if (num_physpages > (32 << 20) >> PAGE_SHIFT) - slab_break_gfp_order = BREAK_GFP_ORDER_HI; + if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT) + slab_max_order = SLAB_MAX_ORDER_HI; /* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: - * 1) initialize the cache_cache cache: it contains the kmem_cache_t - * structures of all caches, except cache_cache itself: cache_cache - * is statically allocated. + * 1) initialize the kmem_cache cache: it contains the struct + * kmem_cache structures of all caches, except kmem_cache itself: + * kmem_cache is statically allocated. * Initially an __init data area is used for the head array and the - * kmem_list3 structures, it's replaced with a kmalloc allocated + * kmem_cache_node structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. * 2) Create the first kmalloc cache. - * The kmem_cache_t for the new cache is allocated normally. + * The struct kmem_cache for the new cache is allocated normally. * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. - * 4) Replace the __init data head arrays for cache_cache and the first + * 4) Replace the __init data head arrays for kmem_cache and the first * kmalloc cache with kmalloc allocated arrays. - * 5) Replace the __init data for kmem_list3 for cache_cache and + * 5) Replace the __init data for kmem_cache_node for kmem_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */ - /* 1) create the cache_cache */ - init_MUTEX(&cache_chain_sem); - INIT_LIST_HEAD(&cache_chain); - list_add(&cache_cache.next, &cache_chain); - cache_cache.colour_off = cache_line_size(); - cache_cache.array[smp_processor_id()] = &initarray_cache.cache; - cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; - - cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size()); - - cache_estimate(0, cache_cache.objsize, cache_line_size(), 0, - &left_over, &cache_cache.num); - if (!cache_cache.num) - BUG(); + /* 1) create the kmem_cache */ - cache_cache.colour = left_over/cache_cache.colour_off; - cache_cache.colour_next = 0; - cache_cache.slab_size = ALIGN(cache_cache.num*sizeof(kmem_bufctl_t) + - sizeof(struct slab), cache_line_size()); + /* + * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids + */ + create_boot_cache(kmem_cache, "kmem_cache", + offsetof(struct kmem_cache, array[nr_cpu_ids]) + + nr_node_ids * sizeof(struct kmem_cache_node *), + SLAB_HWCACHE_ALIGN); + list_add(&kmem_cache->list, &slab_caches); /* 2+3) create the kmalloc caches */ - sizes = malloc_sizes; - names = cache_names; - /* Initialize the caches that provide memory for the array cache - * and the kmem_list3 structures first. - * Without this, further allocations will bug + /* + * Initialize the caches that provide memory for the array cache and the + * kmem_cache_node structures first. Without this, further allocations will + * bug. */ - sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name, - sizes[INDEX_AC].cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); + kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", + kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); - if (INDEX_AC != INDEX_L3) - sizes[INDEX_L3].cs_cachep = - kmem_cache_create(names[INDEX_L3].name, - sizes[INDEX_L3].cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); + if (INDEX_AC != INDEX_NODE) + kmalloc_caches[INDEX_NODE] = + create_kmalloc_cache("kmalloc-node", + kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); - while (sizes->cs_size != ULONG_MAX) { - /* - * For performance, all the general caches are L1 aligned. - * This should be particularly beneficial on SMP boxes, as it - * eliminates "false sharing". - * Note for systems short on memory removing the alignment will - * allow tighter packing of the smaller caches. - */ - if(!sizes->cs_cachep) - sizes->cs_cachep = kmem_cache_create(names->name, - sizes->cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL, NULL); - - /* Inc off-slab bufctl limit until the ceiling is hit. */ - if (!(OFF_SLAB(sizes->cs_cachep))) { - offslab_limit = sizes->cs_size-sizeof(struct slab); - offslab_limit /= sizeof(kmem_bufctl_t); - } - - sizes->cs_dmacachep = kmem_cache_create(names->name_dma, - sizes->cs_size, ARCH_KMALLOC_MINALIGN, - (ARCH_KMALLOC_FLAGS | SLAB_CACHE_DMA | SLAB_PANIC), - NULL, NULL); + slab_early_init = 0; - sizes++; - names++; - } /* 4) Replace the bootstrap head arrays */ { - void * ptr; + struct array_cache *ptr; - ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); + ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - local_irq_disable(); - BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache); - memcpy(ptr, ac_data(&cache_cache), - sizeof(struct arraycache_init)); - cache_cache.array[smp_processor_id()] = ptr; - local_irq_enable(); + memcpy(ptr, cpu_cache_get(kmem_cache), + sizeof(struct arraycache_init)); + /* + * Do not assume that spinlocks can be initialized via memcpy: + */ + spin_lock_init(&ptr->lock); - ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); + kmem_cache->array[smp_processor_id()] = ptr; - local_irq_disable(); - BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep) - != &initarray_generic.cache); - memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep), - sizeof(struct arraycache_init)); - malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = - ptr; - local_irq_enable(); + ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); + + BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) + != &initarray_generic.cache); + memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), + sizeof(struct arraycache_init)); + /* + * Do not assume that spinlocks can be initialized via memcpy: + */ + spin_lock_init(&ptr->lock); + + kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; } - /* 5) Replace the bootstrap kmem_list3's */ + /* 5) Replace the bootstrap kmem_cache_node */ { - int node; - /* Replace the static kmem_list3 structures for the boot cpu */ - init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], - numa_node_id()); - - for_each_online_node(node) { - init_list(malloc_sizes[INDEX_AC].cs_cachep, - &initkmem_list3[SIZE_AC+node], node); - - if (INDEX_AC != INDEX_L3) { - init_list(malloc_sizes[INDEX_L3].cs_cachep, - &initkmem_list3[SIZE_L3+node], - node); + int nid; + + for_each_online_node(nid) { + init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); + + init_list(kmalloc_caches[INDEX_AC], + &init_kmem_cache_node[SIZE_AC + nid], nid); + + if (INDEX_AC != INDEX_NODE) { + init_list(kmalloc_caches[INDEX_NODE], + &init_kmem_cache_node[SIZE_NODE + nid], nid); } } } + create_kmalloc_caches(ARCH_KMALLOC_FLAGS); +} + +void __init kmem_cache_init_late(void) +{ + struct kmem_cache *cachep; + + slab_state = UP; + /* 6) resize the head arrays to their final sizes */ - { - kmem_cache_t *cachep; - down(&cache_chain_sem); - list_for_each_entry(cachep, &cache_chain, next) - enable_cpucache(cachep); - up(&cache_chain_sem); - } + mutex_lock(&slab_mutex); + list_for_each_entry(cachep, &slab_caches, list) + if (enable_cpucache(cachep, GFP_NOWAIT)) + BUG(); + mutex_unlock(&slab_mutex); + + /* Annotate slab for lockdep -- annotate the malloc caches */ + init_lock_keys(); /* Done! */ - g_cpucache_up = FULL; + slab_state = FULL; - /* Register a cpu startup notifier callback - * that initializes ac_data for all new cpus + /* + * Register a cpu startup notifier callback that initializes + * cpu_cache_get for all new cpus */ register_cpu_notifier(&cpucache_notifier); - /* The reap timers are started later, with a module init call: - * That part of the kernel is not yet operational. +#ifdef CONFIG_NUMA + /* + * Register a memory hotplug callback that initializes and frees + * node. + */ + hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); +#endif + + /* + * The reap timers are started later, with a module init call: That part + * of the kernel is not yet operational. */ } @@ -1164,18 +1658,71 @@ static int __init cpucache_init(void) { int cpu; - /* - * Register the timers that return unneeded - * pages to gfp. + /* + * Register the timers that return unneeded pages to the page allocator */ for_each_online_cpu(cpu) start_cpu_timer(cpu); + /* Done! */ + slab_state = FULL; return 0; } - __initcall(cpucache_init); +static noinline void +slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) +{ +#if DEBUG + struct kmem_cache_node *n; + struct page *page; + unsigned long flags; + int node; + static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + + if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs)) + return; + + printk(KERN_WARNING + "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n", + nodeid, gfpflags); + printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n", + cachep->name, cachep->size, cachep->gfporder); + + for_each_online_node(node) { + unsigned long active_objs = 0, num_objs = 0, free_objects = 0; + unsigned long active_slabs = 0, num_slabs = 0; + + n = cachep->node[node]; + if (!n) + continue; + + spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->slabs_full, lru) { + active_objs += cachep->num; + active_slabs++; + } + list_for_each_entry(page, &n->slabs_partial, lru) { + active_objs += page->active; + active_slabs++; + } + list_for_each_entry(page, &n->slabs_free, lru) + num_slabs++; + + free_objects += n->free_objects; + spin_unlock_irqrestore(&n->list_lock, flags); + + num_slabs += active_slabs; + num_objs = num_slabs * cachep->num; + printk(KERN_WARNING + " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n", + node, active_slabs, num_slabs, active_objs, num_objs, + free_objects); + } +#endif +} + /* * Interface to system's page allocator. No need to hold the cache-lock. * @@ -1183,82 +1730,109 @@ __initcall(cpucache_init); * did not request dmaable memory, we might get it, but that * would be relatively rare and ignorable. */ -static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, + int nodeid) { struct page *page; - void *addr; - int i; + int nr_pages; - flags |= cachep->gfpflags; - if (likely(nodeid == -1)) { - page = alloc_pages(flags, cachep->gfporder); - } else { - page = alloc_pages_node(nodeid, flags, cachep->gfporder); - } - if (!page) + flags |= cachep->allocflags; + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; + + if (memcg_charge_slab(cachep, flags, cachep->gfporder)) + return NULL; + + page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder); + if (!page) { + memcg_uncharge_slab(cachep, cachep->gfporder); + slab_out_of_memory(cachep, flags, nodeid); return NULL; - addr = page_address(page); + } + + /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ + if (unlikely(page->pfmemalloc)) + pfmemalloc_active = true; - i = (1 << cachep->gfporder); + nr_pages = (1 << cachep->gfporder); if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_add(i, &slab_reclaim_pages); - add_page_state(nr_slab, i); - while (i--) { - SetPageSlab(page); - page++; + add_zone_page_state(page_zone(page), + NR_SLAB_RECLAIMABLE, nr_pages); + else + add_zone_page_state(page_zone(page), + NR_SLAB_UNRECLAIMABLE, nr_pages); + __SetPageSlab(page); + if (page->pfmemalloc) + SetPageSlabPfmemalloc(page); + + if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { + kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid); + + if (cachep->ctor) + kmemcheck_mark_uninitialized_pages(page, nr_pages); + else + kmemcheck_mark_unallocated_pages(page, nr_pages); } - return addr; + + return page; } /* * Interface to system's page release. */ -static void kmem_freepages(kmem_cache_t *cachep, void *addr) +static void kmem_freepages(struct kmem_cache *cachep, struct page *page) { - unsigned long i = (1<<cachep->gfporder); - struct page *page = virt_to_page(addr); - const unsigned long nr_freed = i; + const unsigned long nr_freed = (1 << cachep->gfporder); + + kmemcheck_free_shadow(page, cachep->gfporder); + + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + sub_zone_page_state(page_zone(page), + NR_SLAB_RECLAIMABLE, nr_freed); + else + sub_zone_page_state(page_zone(page), + NR_SLAB_UNRECLAIMABLE, nr_freed); + + BUG_ON(!PageSlab(page)); + __ClearPageSlabPfmemalloc(page); + __ClearPageSlab(page); + page_mapcount_reset(page); + page->mapping = NULL; - while (i--) { - if (!TestClearPageSlab(page)) - BUG(); - page++; - } - sub_page_state(nr_slab, nr_freed); if (current->reclaim_state) current->reclaim_state->reclaimed_slab += nr_freed; - free_pages((unsigned long)addr, cachep->gfporder); - if (cachep->flags & SLAB_RECLAIM_ACCOUNT) - atomic_sub(1<<cachep->gfporder, &slab_reclaim_pages); + __free_pages(page, cachep->gfporder); + memcg_uncharge_slab(cachep, cachep->gfporder); } static void kmem_rcu_free(struct rcu_head *head) { - struct slab_rcu *slab_rcu = (struct slab_rcu *) head; - kmem_cache_t *cachep = slab_rcu->cachep; + struct kmem_cache *cachep; + struct page *page; - kmem_freepages(cachep, slab_rcu->addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slab_rcu); + page = container_of(head, struct page, rcu_head); + cachep = page->slab_cache; + + kmem_freepages(cachep, page); } #if DEBUG #ifdef CONFIG_DEBUG_PAGEALLOC -static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, - unsigned long caller) +static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr, + unsigned long caller) { - int size = obj_reallen(cachep); + int size = cachep->object_size; - addr = (unsigned long *)&((char*)addr)[obj_dbghead(cachep)]; + addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)]; - if (size < 5*sizeof(unsigned long)) + if (size < 5 * sizeof(unsigned long)) return; - *addr++=0x12345678; - *addr++=caller; - *addr++=smp_processor_id(); - size -= 3*sizeof(unsigned long); + *addr++ = 0x12345678; + *addr++ = caller; + *addr++ = smp_processor_id(); + size -= 3 * sizeof(unsigned long); { unsigned long *sptr = &caller; unsigned long svalue; @@ -1266,7 +1840,7 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, while (!kstack_end(sptr)) { svalue = *sptr++; if (kernel_text_address(svalue)) { - *addr++=svalue; + *addr++ = svalue; size -= sizeof(unsigned long); if (size <= sizeof(unsigned long)) break; @@ -1274,88 +1848,108 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr, } } - *addr++=0x87654321; + *addr++ = 0x87654321; } #endif -static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val) +static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) { - int size = obj_reallen(cachep); - addr = &((char*)addr)[obj_dbghead(cachep)]; + int size = cachep->object_size; + addr = &((char *)addr)[obj_offset(cachep)]; memset(addr, val, size); - *(unsigned char *)(addr+size-1) = POISON_END; + *(unsigned char *)(addr + size - 1) = POISON_END; } static void dump_line(char *data, int offset, int limit) { int i; - printk(KERN_ERR "%03x:", offset); - for (i=0;i<limit;i++) { - printk(" %02x", (unsigned char)data[offset+i]); + unsigned char error = 0; + int bad_count = 0; + + printk(KERN_ERR "%03x: ", offset); + for (i = 0; i < limit; i++) { + if (data[offset + i] != POISON_FREE) { + error = data[offset + i]; + bad_count++; + } + } + print_hex_dump(KERN_CONT, "", 0, 16, 1, + &data[offset], limit, 1); + + if (bad_count == 1) { + error ^= POISON_FREE; + if (!(error & (error - 1))) { + printk(KERN_ERR "Single bit error detected. Probably " + "bad RAM.\n"); +#ifdef CONFIG_X86 + printk(KERN_ERR "Run memtest86+ or a similar memory " + "test tool.\n"); +#else + printk(KERN_ERR "Run a memory test tool.\n"); +#endif + } } - printk("\n"); } #endif #if DEBUG -static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines) +static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) { int i, size; char *realobj; if (cachep->flags & SLAB_RED_ZONE) { - printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n", + printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n", *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); } if (cachep->flags & SLAB_STORE_USER) { - printk(KERN_ERR "Last user: [<%p>]", - *dbg_userword(cachep, objp)); - print_symbol("(%s)", - (unsigned long)*dbg_userword(cachep, objp)); - printk("\n"); - } - realobj = (char*)objp+obj_dbghead(cachep); - size = obj_reallen(cachep); - for (i=0; i<size && lines;i+=16, lines--) { + printk(KERN_ERR "Last user: [<%p>](%pSR)\n", + *dbg_userword(cachep, objp), + *dbg_userword(cachep, objp)); + } + realobj = (char *)objp + obj_offset(cachep); + size = cachep->object_size; + for (i = 0; i < size && lines; i += 16, lines--) { int limit; limit = 16; - if (i+limit > size) - limit = size-i; + if (i + limit > size) + limit = size - i; dump_line(realobj, i, limit); } } -static void check_poison_obj(kmem_cache_t *cachep, void *objp) +static void check_poison_obj(struct kmem_cache *cachep, void *objp) { char *realobj; int size, i; int lines = 0; - realobj = (char*)objp+obj_dbghead(cachep); - size = obj_reallen(cachep); + realobj = (char *)objp + obj_offset(cachep); + size = cachep->object_size; - for (i=0;i<size;i++) { + for (i = 0; i < size; i++) { char exp = POISON_FREE; - if (i == size-1) + if (i == size - 1) exp = POISON_END; if (realobj[i] != exp) { int limit; /* Mismatch ! */ /* Print header */ if (lines == 0) { - printk(KERN_ERR "Slab corruption: start=%p, len=%d\n", - realobj, size); + printk(KERN_ERR + "Slab corruption (%s): %s start=%p, len=%d\n", + print_tainted(), cachep->name, realobj, size); print_objinfo(cachep, objp, 0); } /* Hexdump the affected line */ - i = (i/16)*16; + i = (i / 16) * 16; limit = 16; - if (i+limit > size) - limit = size-i; + if (i + limit > size) + limit = size - i; dump_line(realobj, i, limit); i += 16; lines++; @@ -1368,45 +1962,42 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp) /* Print some data about the neighboring objects, if they * exist: */ - struct slab *slabp = GET_PAGE_SLAB(virt_to_page(objp)); - int objnr; + struct page *page = virt_to_head_page(objp); + unsigned int objnr; - objnr = (objp-slabp->s_mem)/cachep->objsize; + objnr = obj_to_index(cachep, page, objp); if (objnr) { - objp = slabp->s_mem+(objnr-1)*cachep->objsize; - realobj = (char*)objp+obj_dbghead(cachep); + objp = index_to_obj(cachep, page, objnr - 1); + realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Prev obj: start=%p, len=%d\n", - realobj, size); + realobj, size); print_objinfo(cachep, objp, 2); } - if (objnr+1 < cachep->num) { - objp = slabp->s_mem+(objnr+1)*cachep->objsize; - realobj = (char*)objp+obj_dbghead(cachep); + if (objnr + 1 < cachep->num) { + objp = index_to_obj(cachep, page, objnr + 1); + realobj = (char *)objp + obj_offset(cachep); printk(KERN_ERR "Next obj: start=%p, len=%d\n", - realobj, size); + realobj, size); print_objinfo(cachep, objp, 2); } } } #endif -/* Destroy all the objs in a slab, and release the mem back to the system. - * Before calling the slab must have been unlinked from the cache. - * The cache-lock is not held/needed. - */ -static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) -{ - void *addr = slabp->s_mem - slabp->colouroff; - #if DEBUG +static void slab_destroy_debugcheck(struct kmem_cache *cachep, + struct page *page) +{ int i; for (i = 0; i < cachep->num; i++) { - void *objp = slabp->s_mem + cachep->objsize * i; + void *objp = index_to_obj(cachep, page, i); if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize%PAGE_SIZE)==0 && OFF_SLAB(cachep)) - kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE,1); + if (cachep->size % PAGE_SIZE == 0 && + OFF_SLAB(cachep)) + kernel_map_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else @@ -1416,70 +2007,208 @@ static void slab_destroy (kmem_cache_t *cachep, struct slab *slabp) if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "start of a freed object " - "was overwritten"); + "was overwritten"); if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "end of a freed object " - "was overwritten"); + "was overwritten"); } - if (cachep->dtor && !(cachep->flags & SLAB_POISON)) - (cachep->dtor)(objp+obj_dbghead(cachep), cachep, 0); } +} #else - if (cachep->dtor) { - int i; - for (i = 0; i < cachep->num; i++) { - void* objp = slabp->s_mem+cachep->objsize*i; - (cachep->dtor)(objp, cachep, 0); - } - } +static void slab_destroy_debugcheck(struct kmem_cache *cachep, + struct page *page) +{ +} #endif +/** + * slab_destroy - destroy and release all objects in a slab + * @cachep: cache pointer being destroyed + * @page: page pointer being destroyed + * + * Destroy all the objs in a slab, and release the mem back to the system. + * Before calling the slab must have been unlinked from the cache. The + * cache-lock is not held/needed. + */ +static void slab_destroy(struct kmem_cache *cachep, struct page *page) +{ + void *freelist; + + freelist = page->freelist; + slab_destroy_debugcheck(cachep, page); if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) { - struct slab_rcu *slab_rcu; + struct rcu_head *head; + + /* + * RCU free overloads the RCU head over the LRU. + * slab_page has been overloeaded over the LRU, + * however it is not used from now on so that + * we can use it safely. + */ + head = (void *)&page->rcu_head; + call_rcu(head, kmem_rcu_free); - slab_rcu = (struct slab_rcu *) slabp; - slab_rcu->cachep = cachep; - slab_rcu->addr = addr; - call_rcu(&slab_rcu->head, kmem_rcu_free); } else { - kmem_freepages(cachep, addr); - if (OFF_SLAB(cachep)) - kmem_cache_free(cachep->slabp_cache, slabp); + kmem_freepages(cachep, page); + } + + /* + * From now on, we don't use freelist + * although actual page can be freed in rcu context + */ + if (OFF_SLAB(cachep)) + kmem_cache_free(cachep->freelist_cache, freelist); +} + +/** + * calculate_slab_order - calculate size (page order) of slabs + * @cachep: pointer to the cache that is being created + * @size: size of objects to be created in this cache. + * @align: required alignment for the objects. + * @flags: slab allocation flags + * + * Also calculates the number of objects per slab. + * + * This could be made much more intelligent. For now, try to avoid using + * high order pages for slabs. When the gfp() functions are more friendly + * towards high-order requests, this should be changed. + */ +static size_t calculate_slab_order(struct kmem_cache *cachep, + size_t size, size_t align, unsigned long flags) +{ + unsigned long offslab_limit; + size_t left_over = 0; + int gfporder; + + for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) { + unsigned int num; + size_t remainder; + + cache_estimate(gfporder, size, align, flags, &remainder, &num); + if (!num) + continue; + + /* Can't handle number of objects more than SLAB_OBJ_MAX_NUM */ + if (num > SLAB_OBJ_MAX_NUM) + break; + + if (flags & CFLGS_OFF_SLAB) { + size_t freelist_size_per_obj = sizeof(freelist_idx_t); + /* + * Max number of objs-per-slab for caches which + * use off-slab slabs. Needed to avoid a possible + * looping condition in cache_grow(). + */ + if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK)) + freelist_size_per_obj += sizeof(char); + offslab_limit = size; + offslab_limit /= freelist_size_per_obj; + + if (num > offslab_limit) + break; + } + + /* Found something acceptable - save it away */ + cachep->num = num; + cachep->gfporder = gfporder; + left_over = remainder; + + /* + * A VFS-reclaimable slab tends to have most allocations + * as GFP_NOFS and we really don't want to have to be allocating + * higher-order pages when we are unable to shrink dcache. + */ + if (flags & SLAB_RECLAIM_ACCOUNT) + break; + + /* + * Large number of objects is good, but very large slabs are + * currently bad for the gfp()s. + */ + if (gfporder >= slab_max_order) + break; + + /* + * Acceptable internal fragmentation? + */ + if (left_over * 8 <= (PAGE_SIZE << gfporder)) + break; } + return left_over; } -/* For setting up all the kmem_list3s for cache whose objsize is same - as size of kmem_list3. */ -static inline void set_up_list3s(kmem_cache_t *cachep, int index) +static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { - int node; + if (slab_state >= FULL) + return enable_cpucache(cachep, gfp); - for_each_online_node(node) { - cachep->nodelists[node] = &initkmem_list3[index+node]; - cachep->nodelists[node]->next_reap = jiffies + - REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; + if (slab_state == DOWN) { + /* + * Note: Creation of first cache (kmem_cache). + * The setup_node is taken care + * of by the caller of __kmem_cache_create + */ + cachep->array[smp_processor_id()] = &initarray_generic.cache; + slab_state = PARTIAL; + } else if (slab_state == PARTIAL) { + /* + * Note: the second kmem_cache_create must create the cache + * that's used by kmalloc(24), otherwise the creation of + * further caches will BUG(). + */ + cachep->array[smp_processor_id()] = &initarray_generic.cache; + + /* + * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is + * the second cache, then we need to set up all its node/, + * otherwise the creation of further caches will BUG(). + */ + set_up_node(cachep, SIZE_AC); + if (INDEX_AC == INDEX_NODE) + slab_state = PARTIAL_NODE; + else + slab_state = PARTIAL_ARRAYCACHE; + } else { + /* Remaining boot caches */ + cachep->array[smp_processor_id()] = + kmalloc(sizeof(struct arraycache_init), gfp); + + if (slab_state == PARTIAL_ARRAYCACHE) { + set_up_node(cachep, SIZE_NODE); + slab_state = PARTIAL_NODE; + } else { + int node; + for_each_online_node(node) { + cachep->node[node] = + kmalloc_node(sizeof(struct kmem_cache_node), + gfp, node); + BUG_ON(!cachep->node[node]); + kmem_cache_node_init(cachep->node[node]); + } + } } + cachep->node[numa_mem_id()]->next_reap = + jiffies + REAPTIMEOUT_NODE + + ((unsigned long)cachep) % REAPTIMEOUT_NODE; + + cpu_cache_get(cachep)->avail = 0; + cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; + cpu_cache_get(cachep)->batchcount = 1; + cpu_cache_get(cachep)->touched = 0; + cachep->batchcount = 1; + cachep->limit = BOOT_CPUCACHE_ENTRIES; + return 0; } /** - * kmem_cache_create - Create a cache. - * @name: A string which is used in /proc/slabinfo to identify this cache. - * @size: The size of objects to be created in this cache. - * @align: The required alignment for the objects. + * __kmem_cache_create - Create a cache. + * @cachep: cache management descriptor * @flags: SLAB flags - * @ctor: A constructor for the objects. - * @dtor: A destructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache - * and the @dtor is run before the pages are handed back. + * The @ctor is run when new pages are allocated by the cache. * - * @name must be valid until the cache is destroyed. This implies that - * the module calling this has to destroy the cache before getting - * unloaded. - * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) @@ -1488,73 +2217,19 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index) * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * - * %SLAB_NO_REAP - Don't automatically reap this cache when we're under - * memory pressure. - * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */ -kmem_cache_t * -kmem_cache_create (const char *name, size_t size, size_t align, - unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long), - void (*dtor)(void*, kmem_cache_t *, unsigned long)) +int +__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) { - size_t left_over, slab_size, ralign; - kmem_cache_t *cachep = NULL; - struct list_head *p; - - /* - * Sanity checks... these are all serious usage bugs. - */ - if ((!name) || - in_interrupt() || - (size < BYTES_PER_WORD) || - (size > (1<<MAX_OBJ_ORDER)*PAGE_SIZE) || - (dtor && !ctor)) { - printk(KERN_ERR "%s: Early error in slab %s\n", - __FUNCTION__, name); - BUG(); - } - - down(&cache_chain_sem); - - list_for_each(p, &cache_chain) { - kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); - mm_segment_t old_fs = get_fs(); - char tmp; - int res; - - /* - * This happens when the module gets unloaded and doesn't - * destroy its slab cache and no-one else reuses the vmalloc - * area of the module. Print a warning. - */ - set_fs(KERNEL_DS); - res = __get_user(tmp, pc->name); - set_fs(old_fs); - if (res) { - printk("SLAB: cache with size %d has lost its name\n", - pc->objsize); - continue; - } - - if (!strcmp(pc->name,name)) { - printk("kmem_cache_create: duplicate cache %s\n", name); - dump_stack(); - goto oops; - } - } + size_t left_over, freelist_size, ralign; + gfp_t gfp; + int err; + size_t size = cachep->size; #if DEBUG - WARN_ON(strchr(name, ' ')); /* It confuses parsers */ - if ((flags & SLAB_DEBUG_INITIAL) && !ctor) { - /* No constructor, but inital state check requested */ - printk(KERN_ERR "%s: No con, but init state check " - "requested - %s\n", __FUNCTION__, name); - flags &= ~SLAB_DEBUG_INITIAL; - } - #if FORCED_DEBUG /* * Enable redzoning and last user accounting, except for caches with @@ -1562,266 +2237,187 @@ kmem_cache_create (const char *name, size_t size, size_t align, * above the next power of two: caches with object sizes just above a * power of two have a significant amount of internal fragmentation. */ - if ((size < 4096 || fls(size-1) == fls(size-1+3*BYTES_PER_WORD))) - flags |= SLAB_RED_ZONE|SLAB_STORE_USER; + if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN + + 2 * sizeof(unsigned long long))) + flags |= SLAB_RED_ZONE | SLAB_STORE_USER; if (!(flags & SLAB_DESTROY_BY_RCU)) flags |= SLAB_POISON; #endif if (flags & SLAB_DESTROY_BY_RCU) BUG_ON(flags & SLAB_POISON); #endif - if (flags & SLAB_DESTROY_BY_RCU) - BUG_ON(dtor); /* - * Always checks flags, a caller might be expecting debug - * support which isn't available. - */ - if (flags & ~CREATE_MASK) - BUG(); - - /* Check that size is in terms of words. This is needed to avoid + * Check that size is in terms of words. This is needed to avoid * unaligned accesses for some archs when redzoning is used, and makes * sure any on-slab bufctl's are also correctly aligned. */ - if (size & (BYTES_PER_WORD-1)) { - size += (BYTES_PER_WORD-1); - size &= ~(BYTES_PER_WORD-1); + if (size & (BYTES_PER_WORD - 1)) { + size += (BYTES_PER_WORD - 1); + size &= ~(BYTES_PER_WORD - 1); } - /* calculate out the final buffer alignment: */ - /* 1) arch recommendation: can be overridden for debug */ - if (flags & SLAB_HWCACHE_ALIGN) { - /* Default alignment: as specified by the arch code. - * Except if an object is really small, then squeeze multiple - * objects into one cacheline. - */ - ralign = cache_line_size(); - while (size <= ralign/2) - ralign /= 2; - } else { + /* + * Redzoning and user store require word alignment or possibly larger. + * Note this will be overridden by architecture or caller mandated + * alignment if either is greater than BYTES_PER_WORD. + */ + if (flags & SLAB_STORE_USER) ralign = BYTES_PER_WORD; + + if (flags & SLAB_RED_ZONE) { + ralign = REDZONE_ALIGN; + /* If redzoning, ensure that the second redzone is suitably + * aligned, by adjusting the object size accordingly. */ + size += REDZONE_ALIGN - 1; + size &= ~(REDZONE_ALIGN - 1); } - /* 2) arch mandated alignment: disables debug if necessary */ - if (ralign < ARCH_SLAB_MINALIGN) { - ralign = ARCH_SLAB_MINALIGN; - if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); - } - /* 3) caller mandated alignment: disables debug if necessary */ - if (ralign < align) { - ralign = align; - if (ralign > BYTES_PER_WORD) - flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER); + + /* 3) caller mandated alignment */ + if (ralign < cachep->align) { + ralign = cachep->align; } - /* 4) Store it. Note that the debug code below can reduce - * the alignment to BYTES_PER_WORD. + /* disable debug if necessary */ + if (ralign > __alignof__(unsigned long long)) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); + /* + * 4) Store it. */ - align = ralign; + cachep->align = ralign; - /* Get cache's description obj. */ - cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL); - if (!cachep) - goto oops; - memset(cachep, 0, sizeof(kmem_cache_t)); + if (slab_is_available()) + gfp = GFP_KERNEL; + else + gfp = GFP_NOWAIT; + setup_node_pointer(cachep); #if DEBUG - cachep->reallen = size; + /* + * Both debugging options require word-alignment which is calculated + * into align above. + */ if (flags & SLAB_RED_ZONE) { - /* redzoning only works with word aligned caches */ - align = BYTES_PER_WORD; - /* add space for red zone words */ - cachep->dbghead += BYTES_PER_WORD; - size += 2*BYTES_PER_WORD; + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { - /* user store requires word alignment and - * one word storage behind the end of the real - * object. + /* user store requires one word storage behind the end of + * the real object. But if the second red zone needs to be + * aligned to 64 bits, we must allow that much space. */ - align = BYTES_PER_WORD; - size += BYTES_PER_WORD; + if (flags & SLAB_RED_ZONE) + size += REDZONE_ALIGN; + else + size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= malloc_sizes[INDEX_L3+1].cs_size && cachep->reallen > cache_line_size() && size < PAGE_SIZE) { - cachep->dbghead += PAGE_SIZE - size; + if (size >= kmalloc_size(INDEX_NODE + 1) + && cachep->object_size > cache_line_size() + && ALIGN(size, cachep->align) < PAGE_SIZE) { + cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } #endif #endif - /* Determine if the slab management is 'on' or 'off' slab. */ - if (size >= (PAGE_SIZE>>3)) + /* + * Determine if the slab management is 'on' or 'off' slab. + * (bootstrapping cannot cope with offslab caches so don't do + * it too early on. Always use on-slab management when + * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak) + */ + if ((size >= (PAGE_SIZE >> 5)) && !slab_early_init && + !(flags & SLAB_NOLEAKTRACE)) /* * Size is large, assume best to place the slab management obj * off-slab (should allow better packing of objs). */ flags |= CFLGS_OFF_SLAB; - size = ALIGN(size, align); - - if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) { - /* - * A VFS-reclaimable slab tends to have most allocations - * as GFP_NOFS and we really don't want to have to be allocating - * higher-order pages when we are unable to shrink dcache. - */ - cachep->gfporder = 0; - cache_estimate(cachep->gfporder, size, align, flags, - &left_over, &cachep->num); - } else { - /* - * Calculate size (in pages) of slabs, and the num of objs per - * slab. This could be made much more intelligent. For now, - * try to avoid using high page-orders for slabs. When the - * gfp() funcs are more friendly towards high-order requests, - * this should be changed. - */ - do { - unsigned int break_flag = 0; -cal_wastage: - cache_estimate(cachep->gfporder, size, align, flags, - &left_over, &cachep->num); - if (break_flag) - break; - if (cachep->gfporder >= MAX_GFP_ORDER) - break; - if (!cachep->num) - goto next; - if (flags & CFLGS_OFF_SLAB && - cachep->num > offslab_limit) { - /* This num of objs will cause problems. */ - cachep->gfporder--; - break_flag++; - goto cal_wastage; - } + size = ALIGN(size, cachep->align); + /* + * We should restrict the number of objects in a slab to implement + * byte sized index. Refer comment on SLAB_OBJ_MIN_SIZE definition. + */ + if (FREELIST_BYTE_INDEX && size < SLAB_OBJ_MIN_SIZE) + size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); - /* - * Large num of objs is good, but v. large slabs are - * currently bad for the gfp()s. - */ - if (cachep->gfporder >= slab_break_gfp_order) - break; + left_over = calculate_slab_order(cachep, size, cachep->align, flags); - if ((left_over*8) <= (PAGE_SIZE<<cachep->gfporder)) - break; /* Acceptable internal fragmentation. */ -next: - cachep->gfporder++; - } while (1); - } + if (!cachep->num) + return -E2BIG; - if (!cachep->num) { - printk("kmem_cache_create: couldn't create cache %s.\n", name); - kmem_cache_free(&cache_cache, cachep); - cachep = NULL; - goto oops; - } - slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) - + sizeof(struct slab), align); + freelist_size = calculate_freelist_size(cachep->num, cachep->align); /* * If the slab has been placed off-slab, and we have enough space then * move it on-slab. This is at the expense of any extra colouring. */ - if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) { + if (flags & CFLGS_OFF_SLAB && left_over >= freelist_size) { flags &= ~CFLGS_OFF_SLAB; - left_over -= slab_size; + left_over -= freelist_size; } if (flags & CFLGS_OFF_SLAB) { /* really off slab. No need for manual alignment */ - slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab); + freelist_size = calculate_freelist_size(cachep->num, 0); + +#ifdef CONFIG_PAGE_POISONING + /* If we're going to use the generic kernel_map_pages() + * poisoning, then it's going to smash the contents of + * the redzone and userword anyhow, so switch them off. + */ + if (size % PAGE_SIZE == 0 && flags & SLAB_POISON) + flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); +#endif } cachep->colour_off = cache_line_size(); /* Offset must be a multiple of the alignment. */ - if (cachep->colour_off < align) - cachep->colour_off = align; - cachep->colour = left_over/cachep->colour_off; - cachep->slab_size = slab_size; + if (cachep->colour_off < cachep->align) + cachep->colour_off = cachep->align; + cachep->colour = left_over / cachep->colour_off; + cachep->freelist_size = freelist_size; cachep->flags = flags; - cachep->gfpflags = 0; - if (flags & SLAB_CACHE_DMA) - cachep->gfpflags |= GFP_DMA; - spin_lock_init(&cachep->spinlock); - cachep->objsize = size; - - if (flags & CFLGS_OFF_SLAB) - cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u); - cachep->ctor = ctor; - cachep->dtor = dtor; - cachep->name = name; - - /* Don't let CPUs to come and go */ - lock_cpu_hotplug(); - - if (g_cpucache_up == FULL) { - enable_cpucache(cachep); - } else { - if (g_cpucache_up == NONE) { - /* Note: the first kmem_cache_create must create - * the cache that's used by kmalloc(24), otherwise - * the creation of further caches will BUG(). - */ - cachep->array[smp_processor_id()] = - &initarray_generic.cache; + cachep->allocflags = __GFP_COMP; + if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA)) + cachep->allocflags |= GFP_DMA; + cachep->size = size; + cachep->reciprocal_buffer_size = reciprocal_value(size); - /* If the cache that's used by - * kmalloc(sizeof(kmem_list3)) is the first cache, - * then we need to set up all its list3s, otherwise - * the creation of further caches will BUG(). - */ - set_up_list3s(cachep, SIZE_AC); - if (INDEX_AC == INDEX_L3) - g_cpucache_up = PARTIAL_L3; - else - g_cpucache_up = PARTIAL_AC; - } else { - cachep->array[smp_processor_id()] = - kmalloc(sizeof(struct arraycache_init), - GFP_KERNEL); + if (flags & CFLGS_OFF_SLAB) { + cachep->freelist_cache = kmalloc_slab(freelist_size, 0u); + /* + * This is a possibility for one of the kmalloc_{dma,}_caches. + * But since we go off slab only for object size greater than + * PAGE_SIZE/8, and kmalloc_{dma,}_caches get created + * in ascending order,this should not happen at all. + * But leave a BUG_ON for some lucky dude. + */ + BUG_ON(ZERO_OR_NULL_PTR(cachep->freelist_cache)); + } - if (g_cpucache_up == PARTIAL_AC) { - set_up_list3s(cachep, SIZE_L3); - g_cpucache_up = PARTIAL_L3; - } else { - int node; - for_each_online_node(node) { - - cachep->nodelists[node] = - kmalloc_node(sizeof(struct kmem_list3), - GFP_KERNEL, node); - BUG_ON(!cachep->nodelists[node]); - kmem_list3_init(cachep->nodelists[node]); - } - } - } - cachep->nodelists[numa_node_id()]->next_reap = - jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; - - BUG_ON(!ac_data(cachep)); - ac_data(cachep)->avail = 0; - ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES; - ac_data(cachep)->batchcount = 1; - ac_data(cachep)->touched = 0; - cachep->batchcount = 1; - cachep->limit = BOOT_CPUCACHE_ENTRIES; - } - - /* cache setup completed, link it into the list */ - list_add(&cachep->next, &cache_chain); - unlock_cpu_hotplug(); -oops: - if (!cachep && (flags & SLAB_PANIC)) - panic("kmem_cache_create(): failed to create slab `%s'\n", - name); - up(&cache_chain_sem); - return cachep; -} -EXPORT_SYMBOL(kmem_cache_create); + err = setup_cpu_cache(cachep, gfp); + if (err) { + __kmem_cache_shutdown(cachep); + return err; + } + + if (flags & SLAB_DEBUG_OBJECTS) { + /* + * Would deadlock through slab_destroy()->call_rcu()-> + * debug_object_activate()->kmem_cache_alloc(). + */ + WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU); + + slab_set_debugobj_lock_classes(cachep); + } else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU)) + on_slab_lock_classes(cachep); + + return 0; +} #if DEBUG static void check_irq_off(void) @@ -1834,19 +2430,19 @@ static void check_irq_on(void) BUG_ON(irqs_disabled()); } -static void check_spinlock_acquired(kmem_cache_t *cachep) +static void check_spinlock_acquired(struct kmem_cache *cachep) { #ifdef CONFIG_SMP check_irq_off(); - assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); + assert_spin_locked(&cachep->node[numa_mem_id()]->list_lock); #endif } -static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) +static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) { #ifdef CONFIG_SMP check_irq_off(); - assert_spin_locked(&cachep->nodelists[node]->list_lock); + assert_spin_locked(&cachep->node[node]->list_lock); #endif } @@ -1857,228 +2453,184 @@ static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) #define check_spinlock_acquired_node(x, y) do { } while(0) #endif -/* - * Waits for all CPUs to execute func(). - */ -static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) -{ - check_irq_on(); - preempt_disable(); - - local_irq_disable(); - func(arg); - local_irq_enable(); - - if (smp_call_function(func, arg, 1, 1)) - BUG(); - - preempt_enable(); -} - -static void drain_array_locked(kmem_cache_t* cachep, - struct array_cache *ac, int force, int node); +static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, + struct array_cache *ac, + int force, int node); static void do_drain(void *arg) { - kmem_cache_t *cachep = (kmem_cache_t*)arg; + struct kmem_cache *cachep = arg; struct array_cache *ac; - int node = numa_node_id(); + int node = numa_mem_id(); check_irq_off(); - ac = ac_data(cachep); - spin_lock(&cachep->nodelists[node]->list_lock); + ac = cpu_cache_get(cachep); + spin_lock(&cachep->node[node]->list_lock); free_block(cachep, ac->entry, ac->avail, node); - spin_unlock(&cachep->nodelists[node]->list_lock); + spin_unlock(&cachep->node[node]->list_lock); ac->avail = 0; } -static void drain_cpu_caches(kmem_cache_t *cachep) +static void drain_cpu_caches(struct kmem_cache *cachep) { - struct kmem_list3 *l3; + struct kmem_cache_node *n; int node; - smp_call_function_all_cpus(do_drain, cachep); + on_each_cpu(do_drain, cachep, 1); check_irq_on(); - spin_lock_irq(&cachep->spinlock); - for_each_online_node(node) { - l3 = cachep->nodelists[node]; - if (l3) { - spin_lock(&l3->list_lock); - drain_array_locked(cachep, l3->shared, 1, node); - spin_unlock(&l3->list_lock); - if (l3->alien) - drain_alien_cache(cachep, l3); - } + for_each_online_node(node) { + n = cachep->node[node]; + if (n && n->alien) + drain_alien_cache(cachep, n->alien); + } + + for_each_online_node(node) { + n = cachep->node[node]; + if (n) + drain_array(cachep, n, n->shared, 1, node); } - spin_unlock_irq(&cachep->spinlock); } -static int __node_shrink(kmem_cache_t *cachep, int node) +/* + * Remove slabs from the list of free slabs. + * Specify the number of slabs to drain in tofree. + * + * Returns the actual number of slabs released. + */ +static int drain_freelist(struct kmem_cache *cache, + struct kmem_cache_node *n, int tofree) { - struct slab *slabp; - struct kmem_list3 *l3 = cachep->nodelists[node]; - int ret; + struct list_head *p; + int nr_freed; + struct page *page; - for (;;) { - struct list_head *p; + nr_freed = 0; + while (nr_freed < tofree && !list_empty(&n->slabs_free)) { - p = l3->slabs_free.prev; - if (p == &l3->slabs_free) - break; + spin_lock_irq(&n->list_lock); + p = n->slabs_free.prev; + if (p == &n->slabs_free) { + spin_unlock_irq(&n->list_lock); + goto out; + } - slabp = list_entry(l3->slabs_free.prev, struct slab, list); + page = list_entry(p, struct page, lru); #if DEBUG - if (slabp->inuse) - BUG(); + BUG_ON(page->active); #endif - list_del(&slabp->list); - - l3->free_objects -= cachep->num; - spin_unlock_irq(&l3->list_lock); - slab_destroy(cachep, slabp); - spin_lock_irq(&l3->list_lock); + list_del(&page->lru); + /* + * Safe to drop the lock. The slab is no longer linked + * to the cache. + */ + n->free_objects -= cache->num; + spin_unlock_irq(&n->list_lock); + slab_destroy(cache, page); + nr_freed++; } - ret = !list_empty(&l3->slabs_full) || - !list_empty(&l3->slabs_partial); - return ret; +out: + return nr_freed; } -static int __cache_shrink(kmem_cache_t *cachep) +int __kmem_cache_shrink(struct kmem_cache *cachep) { int ret = 0, i = 0; - struct kmem_list3 *l3; + struct kmem_cache_node *n; drain_cpu_caches(cachep); check_irq_on(); for_each_online_node(i) { - l3 = cachep->nodelists[i]; - if (l3) { - spin_lock_irq(&l3->list_lock); - ret += __node_shrink(cachep, i); - spin_unlock_irq(&l3->list_lock); - } - } - return (ret ? 1 : 0); -} + n = cachep->node[i]; + if (!n) + continue; -/** - * kmem_cache_shrink - Shrink a cache. - * @cachep: The cache to shrink. - * - * Releases as many slabs as possible for a cache. - * To help debugging, a zero exit status indicates all slabs were released. - */ -int kmem_cache_shrink(kmem_cache_t *cachep) -{ - if (!cachep || in_interrupt()) - BUG(); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); - return __cache_shrink(cachep); + ret += !list_empty(&n->slabs_full) || + !list_empty(&n->slabs_partial); + } + return (ret ? 1 : 0); } -EXPORT_SYMBOL(kmem_cache_shrink); -/** - * kmem_cache_destroy - delete a cache - * @cachep: the cache to destroy - * - * Remove a kmem_cache_t object from the slab cache. - * Returns 0 on success. - * - * It is expected this function will be called by a module when it is - * unloaded. This will remove the cache completely, and avoid a duplicate - * cache being allocated each time a module is loaded and unloaded, if the - * module doesn't have persistent in-kernel storage across loads and unloads. - * - * The cache must be empty before calling this function. - * - * The caller must guarantee that noone will allocate memory from the cache - * during the kmem_cache_destroy(). - */ -int kmem_cache_destroy(kmem_cache_t * cachep) +int __kmem_cache_shutdown(struct kmem_cache *cachep) { int i; - struct kmem_list3 *l3; - - if (!cachep || in_interrupt()) - BUG(); + struct kmem_cache_node *n; + int rc = __kmem_cache_shrink(cachep); - /* Don't let CPUs to come and go */ - lock_cpu_hotplug(); - - /* Find the cache in the chain of caches. */ - down(&cache_chain_sem); - /* - * the chain is never empty, cache_cache is never destroyed - */ - list_del(&cachep->next); - up(&cache_chain_sem); - - if (__cache_shrink(cachep)) { - slab_error(cachep, "Can't free all objects"); - down(&cache_chain_sem); - list_add(&cachep->next,&cache_chain); - up(&cache_chain_sem); - unlock_cpu_hotplug(); - return 1; - } - - if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) - synchronize_rcu(); + if (rc) + return rc; for_each_online_cpu(i) - kfree(cachep->array[i]); + kfree(cachep->array[i]); - /* NUMA: free the list3 structures */ + /* NUMA: free the node structures */ for_each_online_node(i) { - if ((l3 = cachep->nodelists[i])) { - kfree(l3->shared); - free_alien_cache(l3->alien); - kfree(l3); + n = cachep->node[i]; + if (n) { + kfree(n->shared); + free_alien_cache(n->alien); + kfree(n); } } - kmem_cache_free(&cache_cache, cachep); - - unlock_cpu_hotplug(); - return 0; } -EXPORT_SYMBOL(kmem_cache_destroy); -/* Get the memory for a slab management obj. */ -static struct slab* alloc_slabmgmt(kmem_cache_t *cachep, void *objp, - int colour_off, gfp_t local_flags) +/* + * Get the memory for a slab management obj. + * + * For a slab cache when the slab descriptor is off-slab, the + * slab descriptor can't come from the same cache which is being created, + * Because if it is the case, that means we defer the creation of + * the kmalloc_{dma,}_cache of size sizeof(slab descriptor) to this point. + * And we eventually call down to __kmem_cache_create(), which + * in turn looks up in the kmalloc_{dma,}_caches for the disired-size one. + * This is a "chicken-and-egg" problem. + * + * So the off-slab slab descriptor shall come from the kmalloc_{dma,}_caches, + * which are all initialized during kmem_cache_init(). + */ +static void *alloc_slabmgmt(struct kmem_cache *cachep, + struct page *page, int colour_off, + gfp_t local_flags, int nodeid) { - struct slab *slabp; - + void *freelist; + void *addr = page_address(page); + if (OFF_SLAB(cachep)) { /* Slab management obj is off-slab. */ - slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags); - if (!slabp) + freelist = kmem_cache_alloc_node(cachep->freelist_cache, + local_flags, nodeid); + if (!freelist) return NULL; } else { - slabp = objp+colour_off; - colour_off += cachep->slab_size; + freelist = addr + colour_off; + colour_off += cachep->freelist_size; } - slabp->inuse = 0; - slabp->colouroff = colour_off; - slabp->s_mem = objp+colour_off; + page->active = 0; + page->s_mem = addr + colour_off; + return freelist; +} - return slabp; +static inline freelist_idx_t get_free_obj(struct page *page, unsigned int idx) +{ + return ((freelist_idx_t *)page->freelist)[idx]; } -static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp) +static inline void set_free_obj(struct page *page, + unsigned int idx, freelist_idx_t val) { - return (kmem_bufctl_t *)(slabp+1); + ((freelist_idx_t *)(page->freelist))[idx] = val; } -static void cache_init_objs(kmem_cache_t *cachep, - struct slab *slabp, unsigned long ctor_flags) +static void cache_init_objs(struct kmem_cache *cachep, + struct page *page) { int i; for (i = 0; i < cachep->num; i++) { - void *objp = slabp->s_mem+cachep->objsize*i; + void *objp = index_to_obj(cachep, page, i); #if DEBUG /* need to poison the objs? */ if (cachep->flags & SLAB_POISON) @@ -2091,103 +2643,126 @@ static void cache_init_objs(kmem_cache_t *cachep, *dbg_redzone2(cachep, objp) = RED_INACTIVE; } /* - * Constructors are not allowed to allocate memory from - * the same cache which they are a constructor for. - * Otherwise, deadlock. They must also be threaded. + * Constructors are not allowed to allocate memory from the same + * cache which they are a constructor for. Otherwise, deadlock. + * They must also be threaded. */ if (cachep->ctor && !(cachep->flags & SLAB_POISON)) - cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags); + cachep->ctor(objp + obj_offset(cachep)); if (cachep->flags & SLAB_RED_ZONE) { if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" - " end of an object"); + " end of an object"); if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) slab_error(cachep, "constructor overwrote the" - " start of an object"); + " start of an object"); } - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) - kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); + if ((cachep->size % PAGE_SIZE) == 0 && + OFF_SLAB(cachep) && cachep->flags & SLAB_POISON) + kernel_map_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE, 0); #else if (cachep->ctor) - cachep->ctor(objp, cachep, ctor_flags); + cachep->ctor(objp); #endif - slab_bufctl(slabp)[i] = i+1; + set_obj_status(page, i, OBJECT_FREE); + set_free_obj(page, i, i); } - slab_bufctl(slabp)[i-1] = BUFCTL_END; - slabp->free = 0; } -static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags) +static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) { - if (flags & SLAB_DMA) { - if (!(cachep->gfpflags & GFP_DMA)) - BUG(); - } else { - if (cachep->gfpflags & GFP_DMA) - BUG(); + if (CONFIG_ZONE_DMA_FLAG) { + if (flags & GFP_DMA) + BUG_ON(!(cachep->allocflags & GFP_DMA)); + else + BUG_ON(cachep->allocflags & GFP_DMA); } } -static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp) +static void *slab_get_obj(struct kmem_cache *cachep, struct page *page, + int nodeid) { - int i; - struct page *page; + void *objp; + + objp = index_to_obj(cachep, page, get_free_obj(page, page->active)); + page->active++; +#if DEBUG + WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); +#endif - /* Nasty!!!!!! I hope this is OK. */ - i = 1 << cachep->gfporder; - page = virt_to_page(objp); - do { - SET_PAGE_CACHE(page, cachep); - SET_PAGE_SLAB(page, slabp); - page++; - } while (--i); + return objp; +} + +static void slab_put_obj(struct kmem_cache *cachep, struct page *page, + void *objp, int nodeid) +{ + unsigned int objnr = obj_to_index(cachep, page, objp); +#if DEBUG + unsigned int i; + + /* Verify that the slab belongs to the intended node */ + WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid); + + /* Verify double free bug */ + for (i = page->active; i < cachep->num; i++) { + if (get_free_obj(page, i) == objnr) { + printk(KERN_ERR "slab: double free detected in cache " + "'%s', objp %p\n", cachep->name, objp); + BUG(); + } + } +#endif + page->active--; + set_free_obj(page, page->active, objnr); +} + +/* + * Map pages beginning at addr to the given cache and slab. This is required + * for the slab allocator to be able to lookup the cache and slab of a + * virtual address for kfree, ksize, and slab debugging. + */ +static void slab_map_pages(struct kmem_cache *cache, struct page *page, + void *freelist) +{ + page->slab_cache = cache; + page->freelist = freelist; } /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. */ -static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static int cache_grow(struct kmem_cache *cachep, + gfp_t flags, int nodeid, struct page *page) { - struct slab *slabp; - void *objp; - size_t offset; - gfp_t local_flags; - unsigned long ctor_flags; - struct kmem_list3 *l3; + void *freelist; + size_t offset; + gfp_t local_flags; + struct kmem_cache_node *n; - /* Be lazy and only check for valid flags here, - * keeping it out of the critical path in kmem_cache_alloc(). + /* + * Be lazy and only check for valid flags here, keeping it out of the + * critical path in kmem_cache_alloc(). */ - if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW)) - BUG(); - if (flags & SLAB_NO_GROW) - return 0; - - ctor_flags = SLAB_CTOR_CONSTRUCTOR; - local_flags = (flags & SLAB_LEVEL_MASK); - if (!(local_flags & __GFP_WAIT)) - /* - * Not allowed to sleep. Need to tell a constructor about - * this - it might need to know... - */ - ctor_flags |= SLAB_CTOR_ATOMIC; + BUG_ON(flags & GFP_SLAB_BUG_MASK); + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); - /* About to mess with non-constant members - lock. */ + /* Take the node list lock to change the colour_next on this node */ check_irq_off(); - spin_lock(&cachep->spinlock); + n = cachep->node[nodeid]; + spin_lock(&n->list_lock); /* Get colour for the slab, and cal the next value. */ - offset = cachep->colour_next; - cachep->colour_next++; - if (cachep->colour_next >= cachep->colour) - cachep->colour_next = 0; - offset *= cachep->colour_off; + offset = n->colour_next; + n->colour_next++; + if (n->colour_next >= cachep->colour) + n->colour_next = 0; + spin_unlock(&n->list_lock); - spin_unlock(&cachep->spinlock); + offset *= cachep->colour_off; - check_irq_off(); if (local_flags & __GFP_WAIT) local_irq_enable(); @@ -2199,35 +2774,38 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid) */ kmem_flagcheck(cachep, flags); - /* Get mem for the objs. - * Attempt to allocate a physical page from 'nodeid', + /* + * Get mem for the objs. Attempt to allocate a physical page from + * 'nodeid'. */ - if (!(objp = kmem_getpages(cachep, flags, nodeid))) + if (!page) + page = kmem_getpages(cachep, local_flags, nodeid); + if (!page) goto failed; /* Get slab management. */ - if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags))) + freelist = alloc_slabmgmt(cachep, page, offset, + local_flags & ~GFP_CONSTRAINT_MASK, nodeid); + if (!freelist) goto opps1; - slabp->nodeid = nodeid; - set_slab_attr(cachep, slabp, objp); + slab_map_pages(cachep, page, freelist); - cache_init_objs(cachep, slabp, ctor_flags); + cache_init_objs(cachep, page); if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); - l3 = cachep->nodelists[nodeid]; - spin_lock(&l3->list_lock); + spin_lock(&n->list_lock); /* Make slab active. */ - list_add_tail(&slabp->list, &(l3->slabs_free)); + list_add_tail(&page->lru, &(n->slabs_free)); STATS_INC_GROWN(cachep); - l3->free_objects += cachep->num; - spin_unlock(&l3->list_lock); + n->free_objects += cachep->num; + spin_unlock(&n->list_lock); return 1; opps1: - kmem_freepages(cachep, objp); + kmem_freepages(cachep, page); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); @@ -2240,81 +2818,70 @@ failed: * Perform extra freeing checks: * - detect bad pointers. * - POISON/RED_ZONE checking - * - destructor calls, for caches with POISON+dtor */ static void kfree_debugcheck(const void *objp) { - struct page *page; - if (!virt_addr_valid(objp)) { printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n", - (unsigned long)objp); - BUG(); - } - page = virt_to_page(objp); - if (!PageSlab(page)) { - printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n", (unsigned long)objp); + (unsigned long)objp); BUG(); } } -static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, - void *caller) +static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) +{ + unsigned long long redzone1, redzone2; + + redzone1 = *dbg_redzone1(cache, obj); + redzone2 = *dbg_redzone2(cache, obj); + + /* + * Redzone is ok. + */ + if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE) + return; + + if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE) + slab_error(cache, "double free detected"); + else + slab_error(cache, "memory outside object was overwritten"); + + printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n", + obj, redzone1, redzone2); +} + +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, + unsigned long caller) { - struct page *page; unsigned int objnr; - struct slab *slabp; + struct page *page; - objp -= obj_dbghead(cachep); - kfree_debugcheck(objp); - page = virt_to_page(objp); + BUG_ON(virt_to_cache(objp) != cachep); - if (GET_PAGE_CACHE(page) != cachep) { - printk(KERN_ERR "mismatch in kmem_cache_free: expected cache %p, got %p\n", - GET_PAGE_CACHE(page),cachep); - printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); - printk(KERN_ERR "%p is %s.\n", GET_PAGE_CACHE(page), GET_PAGE_CACHE(page)->name); - WARN_ON(1); - } - slabp = GET_PAGE_SLAB(page); + objp -= obj_offset(cachep); + kfree_debugcheck(objp); + page = virt_to_head_page(objp); if (cachep->flags & SLAB_RED_ZONE) { - if (*dbg_redzone1(cachep, objp) != RED_ACTIVE || *dbg_redzone2(cachep, objp) != RED_ACTIVE) { - slab_error(cachep, "double free, or memory outside" - " object was overwritten"); - printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", - objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); - } + verify_redzone_free(cachep, objp); *dbg_redzone1(cachep, objp) = RED_INACTIVE; *dbg_redzone2(cachep, objp) = RED_INACTIVE; } if (cachep->flags & SLAB_STORE_USER) - *dbg_userword(cachep, objp) = caller; + *dbg_userword(cachep, objp) = (void *)caller; - objnr = (objp-slabp->s_mem)/cachep->objsize; + objnr = obj_to_index(cachep, page, objp); BUG_ON(objnr >= cachep->num); - BUG_ON(objp != slabp->s_mem + objnr*cachep->objsize); + BUG_ON(objp != index_to_obj(cachep, page, objnr)); - if (cachep->flags & SLAB_DEBUG_INITIAL) { - /* Need to call the slab's constructor so the - * caller can perform a verify of its state (debugging). - * Called without the cache-lock held. - */ - cachep->ctor(objp+obj_dbghead(cachep), - cachep, SLAB_CTOR_CONSTRUCTOR|SLAB_CTOR_VERIFY); - } - if (cachep->flags & SLAB_POISON && cachep->dtor) { - /* we want to cache poison the object, - * call the destruction callback - */ - cachep->dtor(objp+obj_dbghead(cachep), cachep, 0); - } + set_obj_status(page, objnr, OBJECT_FREE); if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) { - store_stackinfo(cachep, objp, (unsigned long)caller); - kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0); + if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { + store_stackinfo(cachep, objp, caller); + kernel_map_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE, 0); } else { poison_obj(cachep, objp, POISON_FREE); } @@ -2325,138 +2892,112 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp, return objp; } -static void check_slabp(kmem_cache_t *cachep, struct slab *slabp) -{ - kmem_bufctl_t i; - int entries = 0; - - /* Check slab's freelist to see if this obj is there. */ - for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) { - entries++; - if (entries > cachep->num || i >= cachep->num) - goto bad; - } - if (entries != cachep->num - slabp->inuse) { -bad: - printk(KERN_ERR "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n", - cachep->name, cachep->num, slabp, slabp->inuse); - for (i=0;i<sizeof(slabp)+cachep->num*sizeof(kmem_bufctl_t);i++) { - if ((i%16)==0) - printk("\n%03x:", i); - printk(" %02x", ((unsigned char*)slabp)[i]); - } - printk("\n"); - BUG(); - } -} #else #define kfree_debugcheck(x) do { } while(0) #define cache_free_debugcheck(x,objp,z) (objp) -#define check_slabp(x,y) do { } while(0) #endif -static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags) +static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, + bool force_refill) { int batchcount; - struct kmem_list3 *l3; + struct kmem_cache_node *n; struct array_cache *ac; + int node; check_irq_off(); - ac = ac_data(cachep); + node = numa_mem_id(); + if (unlikely(force_refill)) + goto force_grow; retry: + ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { - /* if there was little recent activity on this - * cache, then perform only a partial refill. - * Otherwise we could generate refill bouncing. + /* + * If there was little recent activity on this cache, then + * perform only a partial refill. Otherwise we could generate + * refill bouncing. */ batchcount = BATCHREFILL_LIMIT; } - l3 = cachep->nodelists[numa_node_id()]; - - BUG_ON(ac->avail > 0 || !l3); - spin_lock(&l3->list_lock); - - if (l3->shared) { - struct array_cache *shared_array = l3->shared; - if (shared_array->avail) { - if (batchcount > shared_array->avail) - batchcount = shared_array->avail; - shared_array->avail -= batchcount; - ac->avail = batchcount; - memcpy(ac->entry, - &(shared_array->entry[shared_array->avail]), - sizeof(void*)*batchcount); - shared_array->touched = 1; - goto alloc_done; - } + n = cachep->node[node]; + + BUG_ON(ac->avail > 0 || !n); + spin_lock(&n->list_lock); + + /* See if we can refill from the shared array */ + if (n->shared && transfer_objects(ac, n->shared, batchcount)) { + n->shared->touched = 1; + goto alloc_done; } + while (batchcount > 0) { struct list_head *entry; - struct slab *slabp; + struct page *page; /* Get slab alloc is to come from. */ - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) + entry = n->slabs_partial.next; + if (entry == &n->slabs_partial) { + n->free_touched = 1; + entry = n->slabs_free.next; + if (entry == &n->slabs_free) goto must_grow; } - slabp = list_entry(entry, struct slab, list); - check_slabp(cachep, slabp); + page = list_entry(entry, struct page, lru); check_spinlock_acquired(cachep); - while (slabp->inuse < cachep->num && batchcount--) { - kmem_bufctl_t next; + + /* + * The slab was either on partial or free list so + * there must be at least one object available for + * allocation. + */ + BUG_ON(page->active >= cachep->num); + + while (page->active < cachep->num && batchcount--) { STATS_INC_ALLOCED(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - /* get obj pointer */ - ac->entry[ac->avail++] = slabp->s_mem + - slabp->free*cachep->objsize; - - slabp->inuse++; - next = slab_bufctl(slabp)[slabp->free]; -#if DEBUG - slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; - WARN_ON(numa_node_id() != slabp->nodeid); -#endif - slabp->free = next; + ac_put_obj(cachep, ac, slab_get_obj(cachep, page, + node)); } - check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ - list_del(&slabp->list); - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); + list_del(&page->lru); + if (page->active == cachep->num) + list_add(&page->lru, &n->slabs_full); else - list_add(&slabp->list, &l3->slabs_partial); + list_add(&page->lru, &n->slabs_partial); } must_grow: - l3->free_objects -= ac->avail; + n->free_objects -= ac->avail; alloc_done: - spin_unlock(&l3->list_lock); + spin_unlock(&n->list_lock); if (unlikely(!ac->avail)) { int x; - x = cache_grow(cachep, flags, numa_node_id()); +force_grow: + x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL); - // cache_grow can reenable interrupts, then ac could change. - ac = ac_data(cachep); - if (!x && ac->avail == 0) // no objects in sight? abort + /* cache_grow can reenable interrupts, then ac could change. */ + ac = cpu_cache_get(cachep); + node = numa_mem_id(); + + /* no objects in sight? abort */ + if (!x && (ac->avail == 0 || force_refill)) return NULL; - if (!ac->avail) // objects refilled by interrupt? + if (!ac->avail) /* objects refilled by interrupt? */ goto retry; } ac->touched = 1; - return ac->entry[--ac->avail]; + + return ac_get_obj(cachep, ac, flags, force_refill); } -static inline void -cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) +static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep, + gfp_t flags) { might_sleep_if(flags & __GFP_WAIT); #if DEBUG @@ -2465,16 +3006,18 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags) } #if DEBUG -static void * -cache_alloc_debugcheck_after(kmem_cache_t *cachep, - gfp_t flags, void *objp, void *caller) +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, + gfp_t flags, void *objp, unsigned long caller) { - if (!objp) + struct page *page; + + if (!objp) return objp; - if (cachep->flags & SLAB_POISON) { + if (cachep->flags & SLAB_POISON) { #ifdef CONFIG_DEBUG_PAGEALLOC - if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) - kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 1); + if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) + kernel_map_pages(virt_to_page(objp), + cachep->size / PAGE_SIZE, 1); else check_poison_obj(cachep, objp); #else @@ -2483,215 +3026,452 @@ cache_alloc_debugcheck_after(kmem_cache_t *cachep, poison_obj(cachep, objp, POISON_INUSE); } if (cachep->flags & SLAB_STORE_USER) - *dbg_userword(cachep, objp) = caller; + *dbg_userword(cachep, objp) = (void *)caller; if (cachep->flags & SLAB_RED_ZONE) { - if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || *dbg_redzone2(cachep, objp) != RED_INACTIVE) { + if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || + *dbg_redzone2(cachep, objp) != RED_INACTIVE) { slab_error(cachep, "double free, or memory outside" " object was overwritten"); - printk(KERN_ERR "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n", - objp, *dbg_redzone1(cachep, objp), *dbg_redzone2(cachep, objp)); + printk(KERN_ERR + "%p: redzone 1:0x%llx, redzone 2:0x%llx\n", + objp, *dbg_redzone1(cachep, objp), + *dbg_redzone2(cachep, objp)); } *dbg_redzone1(cachep, objp) = RED_ACTIVE; *dbg_redzone2(cachep, objp) = RED_ACTIVE; } - objp += obj_dbghead(cachep); - if (cachep->ctor && cachep->flags & SLAB_POISON) { - unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR; - if (!(flags & __GFP_WAIT)) - ctor_flags |= SLAB_CTOR_ATOMIC; - - cachep->ctor(objp, cachep, ctor_flags); - } + page = virt_to_head_page(objp); + set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE); + objp += obj_offset(cachep); + if (cachep->ctor && cachep->flags & SLAB_POISON) + cachep->ctor(objp); + if (ARCH_SLAB_MINALIGN && + ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { + printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", + objp, (int)ARCH_SLAB_MINALIGN); + } return objp; } #else #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags) +static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) { - void* objp; + if (cachep == kmem_cache) + return false; + + return should_failslab(cachep->object_size, flags, cachep->flags); +} + +static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + void *objp; struct array_cache *ac; + bool force_refill = false; check_irq_off(); - ac = ac_data(cachep); + + ac = cpu_cache_get(cachep); if (likely(ac->avail)) { - STATS_INC_ALLOCHIT(cachep); ac->touched = 1; - objp = ac->entry[--ac->avail]; - } else { - STATS_INC_ALLOCMISS(cachep); - objp = cache_alloc_refill(cachep, flags); + objp = ac_get_obj(cachep, ac, flags, false); + + /* + * Allow for the possibility all avail objects are not allowed + * by the current flags + */ + if (objp) { + STATS_INC_ALLOCHIT(cachep); + goto out; + } + force_refill = true; } + + STATS_INC_ALLOCMISS(cachep); + objp = cache_alloc_refill(cachep, flags, force_refill); + /* + * the 'ac' may be updated by cache_alloc_refill(), + * and kmemleak_erase() requires its correct value. + */ + ac = cpu_cache_get(cachep); + +out: + /* + * To avoid a false negative, if an object that is in one of the + * per-CPU caches is leaked, we need to make sure kmemleak doesn't + * treat the array pointers as a reference to the object. + */ + if (objp) + kmemleak_erase(&ac->entry[ac->avail]); return objp; } -static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags) +#ifdef CONFIG_NUMA +/* + * Try allocating on another node if PF_SPREAD_SLAB is a mempolicy is set. + * + * If we are in_interrupt, then process context, including cpusets and + * mempolicy, may not apply and should not be used for allocation policy. + */ +static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { - unsigned long save_flags; - void* objp; + int nid_alloc, nid_here; - cache_alloc_debugcheck_before(cachep, flags); + if (in_interrupt() || (flags & __GFP_THISNODE)) + return NULL; + nid_alloc = nid_here = numa_mem_id(); + if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) + nid_alloc = cpuset_slab_spread_node(); + else if (current->mempolicy) + nid_alloc = mempolicy_slab_node(); + if (nid_alloc != nid_here) + return ____cache_alloc_node(cachep, flags, nid_alloc); + return NULL; +} - local_irq_save(save_flags); - objp = ____cache_alloc(cachep, flags); - local_irq_restore(save_flags); - objp = cache_alloc_debugcheck_after(cachep, flags, objp, - __builtin_return_address(0)); - prefetchw(objp); - return objp; +/* + * Fallback function if there was no memory available and no objects on a + * certain node and fall back is permitted. First we scan all the + * available node for available objects. If that fails then we + * perform an allocation without specifying a node. This allows the page + * allocator to do its reclaim / fallback magic. We then insert the + * slab into the proper nodelist and then allocate from it. + */ +static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) +{ + struct zonelist *zonelist; + gfp_t local_flags; + struct zoneref *z; + struct zone *zone; + enum zone_type high_zoneidx = gfp_zone(flags); + void *obj = NULL; + int nid; + unsigned int cpuset_mems_cookie; + + if (flags & __GFP_THISNODE) + return NULL; + + local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); + +retry_cpuset: + cpuset_mems_cookie = read_mems_allowed_begin(); + zonelist = node_zonelist(mempolicy_slab_node(), flags); + +retry: + /* + * Look through allowed nodes for objects available + * from existing per node queues. + */ + for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { + nid = zone_to_nid(zone); + + if (cpuset_zone_allowed_hardwall(zone, flags) && + cache->node[nid] && + cache->node[nid]->free_objects) { + obj = ____cache_alloc_node(cache, + flags | GFP_THISNODE, nid); + if (obj) + break; + } + } + + if (!obj) { + /* + * This allocation will be performed within the constraints + * of the current cpuset / memory policy requirements. + * We may trigger various forms of reclaim on the allowed + * set and go into memory reserves if necessary. + */ + struct page *page; + + if (local_flags & __GFP_WAIT) + local_irq_enable(); + kmem_flagcheck(cache, flags); + page = kmem_getpages(cache, local_flags, numa_mem_id()); + if (local_flags & __GFP_WAIT) + local_irq_disable(); + if (page) { + /* + * Insert into the appropriate per node queues + */ + nid = page_to_nid(page); + if (cache_grow(cache, flags, nid, page)) { + obj = ____cache_alloc_node(cache, + flags | GFP_THISNODE, nid); + if (!obj) + /* + * Another processor may allocate the + * objects in the slab since we are + * not holding any locks. + */ + goto retry; + } else { + /* cache_grow already freed obj */ + obj = NULL; + } + } + } + + if (unlikely(!obj && read_mems_allowed_retry(cpuset_mems_cookie))) + goto retry_cpuset; + return obj; } -#ifdef CONFIG_NUMA /* * A interface to enable slab creation on nodeid */ -static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) +static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, + int nodeid) { struct list_head *entry; - struct slab *slabp; - struct kmem_list3 *l3; - void *obj; - kmem_bufctl_t next; - int x; + struct page *page; + struct kmem_cache_node *n; + void *obj; + int x; - l3 = cachep->nodelists[nodeid]; - BUG_ON(!l3); + VM_BUG_ON(nodeid > num_online_nodes()); + n = cachep->node[nodeid]; + BUG_ON(!n); retry: - spin_lock(&l3->list_lock); - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) - goto must_grow; - } - - slabp = list_entry(entry, struct slab, list); - check_spinlock_acquired_node(cachep, nodeid); - check_slabp(cachep, slabp); - - STATS_INC_NODEALLOCS(cachep); - STATS_INC_ACTIVE(cachep); - STATS_SET_HIGH(cachep); - - BUG_ON(slabp->inuse == cachep->num); - - /* get obj pointer */ - obj = slabp->s_mem + slabp->free*cachep->objsize; - slabp->inuse++; - next = slab_bufctl(slabp)[slabp->free]; -#if DEBUG - slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE; -#endif - slabp->free = next; - check_slabp(cachep, slabp); - l3->free_objects--; - /* move slabp to correct slabp list: */ - list_del(&slabp->list); + check_irq_off(); + spin_lock(&n->list_lock); + entry = n->slabs_partial.next; + if (entry == &n->slabs_partial) { + n->free_touched = 1; + entry = n->slabs_free.next; + if (entry == &n->slabs_free) + goto must_grow; + } - if (slabp->free == BUFCTL_END) { - list_add(&slabp->list, &l3->slabs_full); - } else { - list_add(&slabp->list, &l3->slabs_partial); - } + page = list_entry(entry, struct page, lru); + check_spinlock_acquired_node(cachep, nodeid); - spin_unlock(&l3->list_lock); - goto done; + STATS_INC_NODEALLOCS(cachep); + STATS_INC_ACTIVE(cachep); + STATS_SET_HIGH(cachep); + + BUG_ON(page->active == cachep->num); + + obj = slab_get_obj(cachep, page, nodeid); + n->free_objects--; + /* move slabp to correct slabp list: */ + list_del(&page->lru); + + if (page->active == cachep->num) + list_add(&page->lru, &n->slabs_full); + else + list_add(&page->lru, &n->slabs_partial); + + spin_unlock(&n->list_lock); + goto done; must_grow: - spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags, nodeid); + spin_unlock(&n->list_lock); + x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL); + if (x) + goto retry; - if (!x) - return NULL; + return fallback_alloc(cachep, flags); - goto retry; done: - return obj; + return obj; +} + +static __always_inline void * +slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, + unsigned long caller) +{ + unsigned long save_flags; + void *ptr; + int slab_node = numa_mem_id(); + + flags &= gfp_allowed_mask; + + lockdep_trace_alloc(flags); + + if (slab_should_failslab(cachep, flags)) + return NULL; + + cachep = memcg_kmem_get_cache(cachep, flags); + + cache_alloc_debugcheck_before(cachep, flags); + local_irq_save(save_flags); + + if (nodeid == NUMA_NO_NODE) + nodeid = slab_node; + + if (unlikely(!cachep->node[nodeid])) { + /* Node not bootstrapped yet */ + ptr = fallback_alloc(cachep, flags); + goto out; + } + + if (nodeid == slab_node) { + /* + * Use the locally cached objects if possible. + * However ____cache_alloc does not allow fallback + * to other nodes. It may fail while we still have + * objects on other nodes available. + */ + ptr = ____cache_alloc(cachep, flags); + if (ptr) + goto out; + } + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + out: + local_irq_restore(save_flags); + ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags, + flags); + + if (likely(ptr)) { + kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size); + if (unlikely(flags & __GFP_ZERO)) + memset(ptr, 0, cachep->object_size); + } + + return ptr; +} + +static __always_inline void * +__do_cache_alloc(struct kmem_cache *cache, gfp_t flags) +{ + void *objp; + + if (current->mempolicy || unlikely(current->flags & PF_SPREAD_SLAB)) { + objp = alternate_node_alloc(cache, flags); + if (objp) + goto out; + } + objp = ____cache_alloc(cache, flags); + + /* + * We may just have run out of memory on the local node. + * ____cache_alloc_node() knows how to locate memory on other nodes + */ + if (!objp) + objp = ____cache_alloc_node(cache, flags, numa_mem_id()); + + out: + return objp; +} +#else + +static __always_inline void * +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags) +{ + return ____cache_alloc(cachep, flags); +} + +#endif /* CONFIG_NUMA */ + +static __always_inline void * +slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) +{ + unsigned long save_flags; + void *objp; + + flags &= gfp_allowed_mask; + + lockdep_trace_alloc(flags); + + if (slab_should_failslab(cachep, flags)) + return NULL; + + cachep = memcg_kmem_get_cache(cachep, flags); + + cache_alloc_debugcheck_before(cachep, flags); + local_irq_save(save_flags); + objp = __do_cache_alloc(cachep, flags); + local_irq_restore(save_flags); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags, + flags); + prefetchw(objp); + + if (likely(objp)) { + kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size); + if (unlikely(flags & __GFP_ZERO)) + memset(objp, 0, cachep->object_size); + } + + return objp; } -#endif /* - * Caller needs to acquire correct kmem_list's list_lock + * Caller needs to acquire correct kmem_cache_node's list_lock */ -static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node) +static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, + int node) { int i; - struct kmem_list3 *l3; + struct kmem_cache_node *n; for (i = 0; i < nr_objects; i++) { - void *objp = objpp[i]; - struct slab *slabp; - unsigned int objnr; + void *objp; + struct page *page; - slabp = GET_PAGE_SLAB(virt_to_page(objp)); - l3 = cachep->nodelists[node]; - list_del(&slabp->list); - objnr = (objp - slabp->s_mem) / cachep->objsize; - check_spinlock_acquired_node(cachep, node); - check_slabp(cachep, slabp); + clear_obj_pfmemalloc(&objpp[i]); + objp = objpp[i]; -#if DEBUG - /* Verify that the slab belongs to the intended node */ - WARN_ON(slabp->nodeid != node); - - if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) { - printk(KERN_ERR "slab: double free detected in cache " - "'%s', objp %p\n", cachep->name, objp); - BUG(); - } -#endif - slab_bufctl(slabp)[objnr] = slabp->free; - slabp->free = objnr; + page = virt_to_head_page(objp); + n = cachep->node[node]; + list_del(&page->lru); + check_spinlock_acquired_node(cachep, node); + slab_put_obj(cachep, page, objp, node); STATS_DEC_ACTIVE(cachep); - slabp->inuse--; - l3->free_objects++; - check_slabp(cachep, slabp); + n->free_objects++; /* fixup slab chains */ - if (slabp->inuse == 0) { - if (l3->free_objects > l3->free_limit) { - l3->free_objects -= cachep->num; - slab_destroy(cachep, slabp); + if (page->active == 0) { + if (n->free_objects > n->free_limit) { + n->free_objects -= cachep->num; + /* No need to drop any previously held + * lock here, even if we have a off-slab slab + * descriptor it is guaranteed to come from + * a different cache, refer to comments before + * alloc_slabmgmt. + */ + slab_destroy(cachep, page); } else { - list_add(&slabp->list, &l3->slabs_free); + list_add(&page->lru, &n->slabs_free); } } else { /* Unconditionally move a slab to the end of the * partial list on free - maximum time for the * other objects to be freed, too. */ - list_add_tail(&slabp->list, &l3->slabs_partial); + list_add_tail(&page->lru, &n->slabs_partial); } } } -static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac) +static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) { int batchcount; - struct kmem_list3 *l3; - int node = numa_node_id(); + struct kmem_cache_node *n; + int node = numa_mem_id(); batchcount = ac->batchcount; #if DEBUG BUG_ON(!batchcount || batchcount > ac->avail); #endif check_irq_off(); - l3 = cachep->nodelists[node]; - spin_lock(&l3->list_lock); - if (l3->shared) { - struct array_cache *shared_array = l3->shared; - int max = shared_array->limit-shared_array->avail; + n = cachep->node[node]; + spin_lock(&n->list_lock); + if (n->shared) { + struct array_cache *shared_array = n->shared; + int max = shared_array->limit - shared_array->avail; if (max) { if (batchcount > max) batchcount = max; memcpy(&(shared_array->entry[shared_array->avail]), - ac->entry, - sizeof(void*)*batchcount); + ac->entry, sizeof(void *) * batchcount); shared_array->avail += batchcount; goto free_done; } @@ -2704,12 +3484,12 @@ free_done: int i = 0; struct list_head *p; - p = l3->slabs_free.next; - while (p != &(l3->slabs_free)) { - struct slab *slabp; + p = n->slabs_free.next; + while (p != &(n->slabs_free)) { + struct page *page; - slabp = list_entry(p, struct slab, list); - BUG_ON(slabp->inuse); + page = list_entry(p, struct page, lru); + BUG_ON(page->active); i++; p = p->next; @@ -2717,68 +3497,44 @@ free_done: STATS_SET_FREEABLE(cachep, i); } #endif - spin_unlock(&l3->list_lock); + spin_unlock(&n->list_lock); ac->avail -= batchcount; - memmove(ac->entry, &(ac->entry[batchcount]), - sizeof(void*)*ac->avail); + memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); } - /* - * __cache_free - * Release an obj back to its cache. If the obj has a constructed - * state, it must be in this state _before_ it is released. - * - * Called with disabled ints. + * Release an obj back to its cache. If the obj has a constructed state, it must + * be in this state _before_ it is released. Called with disabled ints. */ -static inline void __cache_free(kmem_cache_t *cachep, void *objp) +static inline void __cache_free(struct kmem_cache *cachep, void *objp, + unsigned long caller) { - struct array_cache *ac = ac_data(cachep); + struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); + kmemleak_free_recursive(objp, cachep->flags); + objp = cache_free_debugcheck(cachep, objp, caller); + + kmemcheck_slab_free(cachep, objp, cachep->object_size); - /* Make sure we are not freeing a object from another - * node to the array cache on this cpu. + /* + * Skip calling cache_free_alien() when the platform is not numa. + * This will avoid cache misses that happen while accessing slabp (which + * is per page memory reference) to get nodeid. Instead use a global + * variable to skip the call, which is mostly likely to be present in + * the cache. */ -#ifdef CONFIG_NUMA - { - struct slab *slabp; - slabp = GET_PAGE_SLAB(virt_to_page(objp)); - if (unlikely(slabp->nodeid != numa_node_id())) { - struct array_cache *alien = NULL; - int nodeid = slabp->nodeid; - struct kmem_list3 *l3 = cachep->nodelists[numa_node_id()]; - - STATS_INC_NODEFREES(cachep); - if (l3->alien && l3->alien[nodeid]) { - alien = l3->alien[nodeid]; - spin_lock(&alien->lock); - if (unlikely(alien->avail == alien->limit)) - __drain_alien_cache(cachep, - alien, nodeid); - alien->entry[alien->avail++] = objp; - spin_unlock(&alien->lock); - } else { - spin_lock(&(cachep->nodelists[nodeid])-> - list_lock); - free_block(cachep, &objp, 1, nodeid); - spin_unlock(&(cachep->nodelists[nodeid])-> - list_lock); - } - return; - } - } -#endif + if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) + return; + if (likely(ac->avail < ac->limit)) { STATS_INC_FREEHIT(cachep); - ac->entry[ac->avail++] = objp; - return; } else { STATS_INC_FREEMISS(cachep); cache_flusharray(cachep, ac); - ac->entry[ac->avail++] = objp; } + + ac_put_obj(cachep, ac, objp); } /** @@ -2789,53 +3545,31 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp) * Allocate an object from this cache. The flags are only relevant * if the cache has no available objects. */ -void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { - return __cache_alloc(cachep, flags); + void *ret = slab_alloc(cachep, flags, _RET_IP_); + + trace_kmem_cache_alloc(_RET_IP_, ret, + cachep->object_size, cachep->size, flags); + + return ret; } EXPORT_SYMBOL(kmem_cache_alloc); -/** - * kmem_ptr_validate - check if an untrusted pointer might - * be a slab entry. - * @cachep: the cache we're checking against - * @ptr: pointer to validate - * - * This verifies that the untrusted pointer looks sane: - * it is _not_ a guarantee that the pointer is actually - * part of the slab cache in question, but it at least - * validates that the pointer can be dereferenced and - * looks half-way sane. - * - * Currently only used for dentry validation. - */ -int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr) +#ifdef CONFIG_TRACING +void * +kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) { - unsigned long addr = (unsigned long) ptr; - unsigned long min_addr = PAGE_OFFSET; - unsigned long align_mask = BYTES_PER_WORD-1; - unsigned long size = cachep->objsize; - struct page *page; + void *ret; - if (unlikely(addr < min_addr)) - goto out; - if (unlikely(addr > (unsigned long)high_memory - size)) - goto out; - if (unlikely(addr & align_mask)) - goto out; - if (unlikely(!kern_addr_valid(addr))) - goto out; - if (unlikely(!kern_addr_valid(addr + size - 1))) - goto out; - page = virt_to_page(ptr); - if (unlikely(!PageSlab(page))) - goto out; - if (unlikely(GET_PAGE_CACHE(page) != cachep)) - goto out; - return 1; -out: - return 0; + ret = slab_alloc(cachep, flags, _RET_IP_); + + trace_kmalloc(_RET_IP_, ret, + size, cachep->size, flags); + return ret; } +EXPORT_SYMBOL(kmem_cache_alloc_trace); +#endif #ifdef CONFIG_NUMA /** @@ -2844,136 +3578,117 @@ out: * @flags: See kmalloc(). * @nodeid: node number of the target node. * - * Identical to kmem_cache_alloc, except that this function is slow - * and can sleep. And it will allocate memory on the given node, which - * can improve the performance for cpu bound structures. - * New and improved: it will now make sure that the object gets - * put on the correct node list so that there is no false sharing. + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid) +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { - unsigned long save_flags; - void *ptr; + void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); - if (nodeid == -1) - return __cache_alloc(cachep, flags); - - if (unlikely(!cachep->nodelists[nodeid])) { - /* Fall back to __cache_alloc if we run into trouble */ - printk(KERN_WARNING "slab: not allocating in inactive node %d for cache %s\n", nodeid, cachep->name); - return __cache_alloc(cachep,flags); - } - - cache_alloc_debugcheck_before(cachep, flags); - local_irq_save(save_flags); - if (nodeid == numa_node_id()) - ptr = ____cache_alloc(cachep, flags); - else - ptr = __cache_alloc_node(cachep, flags, nodeid); - local_irq_restore(save_flags); - ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, __builtin_return_address(0)); + trace_kmem_cache_alloc_node(_RET_IP_, ret, + cachep->object_size, cachep->size, + flags, nodeid); - return ptr; + return ret; } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmalloc_node(size_t size, gfp_t flags, int node) +#ifdef CONFIG_TRACING +void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, + gfp_t flags, + int nodeid, + size_t size) { - kmem_cache_t *cachep; + void *ret; - cachep = kmem_find_general_cachep(size, flags); - if (unlikely(cachep == NULL)) - return NULL; - return kmem_cache_alloc_node(cachep, flags, node); + ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); + + trace_kmalloc_node(_RET_IP_, ret, + size, cachep->size, + flags, nodeid); + return ret; } -EXPORT_SYMBOL(kmalloc_node); +EXPORT_SYMBOL(kmem_cache_alloc_node_trace); #endif -/** - * kmalloc - allocate memory - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - * - * kmalloc is the normal method of allocating memory - * in the kernel. - * - * The @flags argument may be one of: - * - * %GFP_USER - Allocate memory on behalf of user. May sleep. - * - * %GFP_KERNEL - Allocate normal kernel ram. May sleep. - * - * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers. - * - * Additionally, the %GFP_DMA flag may be set to indicate the memory - * must be suitable for DMA. This can mean different things on different - * platforms. For example, on i386, it means that the memory must come - * from the first 16MB. - */ -void *__kmalloc(size_t size, gfp_t flags) +static __always_inline void * +__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) { - kmem_cache_t *cachep; + struct kmem_cache *cachep; - /* If you want to save a few bytes .text space: replace - * __ with kmem_. - * Then kmalloc uses the uninlined functions instead of the inline - * functions. - */ - cachep = __find_general_cachep(size, flags); - if (unlikely(cachep == NULL)) - return NULL; - return __cache_alloc(cachep, flags); + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; + return kmem_cache_alloc_node_trace(cachep, flags, node, size); } -EXPORT_SYMBOL(__kmalloc); -#ifdef CONFIG_SMP +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) +void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __do_kmalloc_node(size, flags, node, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc_node); + +void *__kmalloc_node_track_caller(size_t size, gfp_t flags, + int node, unsigned long caller) +{ + return __do_kmalloc_node(size, flags, node, caller); +} +EXPORT_SYMBOL(__kmalloc_node_track_caller); +#else +void *__kmalloc_node(size_t size, gfp_t flags, int node) +{ + return __do_kmalloc_node(size, flags, node, 0); +} +EXPORT_SYMBOL(__kmalloc_node); +#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ +#endif /* CONFIG_NUMA */ + /** - * __alloc_percpu - allocate one copy of the object for every present - * cpu in the system, zeroing them. - * Objects should be dereferenced using the per_cpu_ptr macro only. - * + * __do_kmalloc - allocate memory * @size: how many bytes of memory are required. - * @align: the alignment, which can't be greater than SMP_CACHE_BYTES. + * @flags: the type of memory to allocate (see kmalloc). + * @caller: function caller for debug tracking of the caller */ -void *__alloc_percpu(size_t size, size_t align) +static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, + unsigned long caller) { - int i; - struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL); + struct kmem_cache *cachep; + void *ret; - if (!pdata) - return NULL; + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; + ret = slab_alloc(cachep, flags, caller); - /* - * Cannot use for_each_online_cpu since a cpu may come online - * and we have no way of figuring out how to fix the array - * that we have allocated then.... - */ - for_each_cpu(i) { - int node = cpu_to_node(i); + trace_kmalloc(caller, ret, + size, cachep->size, flags); - if (node_online(node)) - pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node); - else - pdata->ptrs[i] = kmalloc(size, GFP_KERNEL); + return ret; +} - if (!pdata->ptrs[i]) - goto unwind_oom; - memset(pdata->ptrs[i], 0, size); - } - /* Catch derefs w/o wrappers */ - return (void *) (~(unsigned long) pdata); +#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) +void *__kmalloc(size_t size, gfp_t flags) +{ + return __do_kmalloc(size, flags, _RET_IP_); +} +EXPORT_SYMBOL(__kmalloc); + +void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) +{ + return __do_kmalloc(size, flags, caller); +} +EXPORT_SYMBOL(__kmalloc_track_caller); -unwind_oom: - while (--i >= 0) { - if (!cpu_possible(i)) - continue; - kfree(pdata->ptrs[i]); - } - kfree(pdata); - return NULL; +#else +void *__kmalloc(size_t size, gfp_t flags) +{ + return __do_kmalloc(size, flags, 0); } -EXPORT_SYMBOL(__alloc_percpu); +EXPORT_SYMBOL(__kmalloc); #endif /** @@ -2984,29 +3699,23 @@ EXPORT_SYMBOL(__alloc_percpu); * Free an object which was previously allocated from this * cache. */ -void kmem_cache_free(kmem_cache_t *cachep, void *objp) +void kmem_cache_free(struct kmem_cache *cachep, void *objp) { unsigned long flags; + cachep = cache_from_obj(cachep, objp); + if (!cachep) + return; local_irq_save(flags); - __cache_free(cachep, objp); + debug_check_no_locks_freed(objp, cachep->object_size); + if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) + debug_check_no_obj_freed(objp, cachep->object_size); + __cache_free(cachep, objp, _RET_IP_); local_irq_restore(flags); -} -EXPORT_SYMBOL(kmem_cache_free); -/** - * kzalloc - allocate memory. The memory is set to zero. - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate. - */ -void *kzalloc(size_t size, gfp_t flags) -{ - void *ret = kmalloc(size, flags); - if (ret) - memset(ret, 0, size); - return ret; + trace_kmem_cache_free(_RET_IP_, objp); } -EXPORT_SYMBOL(kzalloc); +EXPORT_SYMBOL(kmem_cache_free); /** * kfree - free previously allocated memory @@ -3019,201 +3728,238 @@ EXPORT_SYMBOL(kzalloc); */ void kfree(const void *objp) { - kmem_cache_t *c; + struct kmem_cache *c; unsigned long flags; - if (unlikely(!objp)) + trace_kfree(_RET_IP_, objp); + + if (unlikely(ZERO_OR_NULL_PTR(objp))) return; local_irq_save(flags); kfree_debugcheck(objp); - c = GET_PAGE_CACHE(virt_to_page(objp)); - __cache_free(c, (void*)objp); + c = virt_to_cache(objp); + debug_check_no_locks_freed(objp, c->object_size); + + debug_check_no_obj_freed(objp, c->object_size); + __cache_free(c, (void *)objp, _RET_IP_); local_irq_restore(flags); } EXPORT_SYMBOL(kfree); -#ifdef CONFIG_SMP -/** - * free_percpu - free previously allocated percpu memory - * @objp: pointer returned by alloc_percpu. - * - * Don't free memory not originally allocated by alloc_percpu() - * The complemented objp is to check for that. - */ -void -free_percpu(const void *objp) -{ - int i; - struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp); - - /* - * We allocate for all cpus so we cannot use for online cpu here. - */ - for_each_cpu(i) - kfree(p->ptrs[i]); - kfree(p); -} -EXPORT_SYMBOL(free_percpu); -#endif - -unsigned int kmem_cache_size(kmem_cache_t *cachep) -{ - return obj_reallen(cachep); -} -EXPORT_SYMBOL(kmem_cache_size); - -const char *kmem_cache_name(kmem_cache_t *cachep) -{ - return cachep->name; -} -EXPORT_SYMBOL_GPL(kmem_cache_name); - /* - * This initializes kmem_list3 for all nodes. + * This initializes kmem_cache_node or resizes various caches for all nodes. */ -static int alloc_kmemlist(kmem_cache_t *cachep) +static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) { int node; - struct kmem_list3 *l3; - int err = 0; + struct kmem_cache_node *n; + struct array_cache *new_shared; + struct array_cache **new_alien = NULL; for_each_online_node(node) { - struct array_cache *nc = NULL, *new; - struct array_cache **new_alien = NULL; -#ifdef CONFIG_NUMA - if (!(new_alien = alloc_alien_cache(node, cachep->limit))) - goto fail; -#endif - if (!(new = alloc_arraycache(node, (cachep->shared* - cachep->batchcount), 0xbaadf00d))) - goto fail; - if ((l3 = cachep->nodelists[node])) { - spin_lock_irq(&l3->list_lock); + if (use_alien_caches) { + new_alien = alloc_alien_cache(node, cachep->limit, gfp); + if (!new_alien) + goto fail; + } + + new_shared = NULL; + if (cachep->shared) { + new_shared = alloc_arraycache(node, + cachep->shared*cachep->batchcount, + 0xbaadf00d, gfp); + if (!new_shared) { + free_alien_cache(new_alien); + goto fail; + } + } + + n = cachep->node[node]; + if (n) { + struct array_cache *shared = n->shared; - if ((nc = cachep->nodelists[node]->shared)) - free_block(cachep, nc->entry, - nc->avail, node); + spin_lock_irq(&n->list_lock); - l3->shared = new; - if (!cachep->nodelists[node]->alien) { - l3->alien = new_alien; + if (shared) + free_block(cachep, shared->entry, + shared->avail, node); + + n->shared = new_shared; + if (!n->alien) { + n->alien = new_alien; new_alien = NULL; } - l3->free_limit = (1 + nr_cpus_node(node))* - cachep->batchcount + cachep->num; - spin_unlock_irq(&l3->list_lock); - kfree(nc); + n->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; + spin_unlock_irq(&n->list_lock); + kfree(shared); free_alien_cache(new_alien); continue; } - if (!(l3 = kmalloc_node(sizeof(struct kmem_list3), - GFP_KERNEL, node))) + n = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node); + if (!n) { + free_alien_cache(new_alien); + kfree(new_shared); goto fail; + } - kmem_list3_init(l3); - l3->next_reap = jiffies + REAPTIMEOUT_LIST3 + - ((unsigned long)cachep)%REAPTIMEOUT_LIST3; - l3->shared = new; - l3->alien = new_alien; - l3->free_limit = (1 + nr_cpus_node(node))* - cachep->batchcount + cachep->num; - cachep->nodelists[node] = l3; + kmem_cache_node_init(n); + n->next_reap = jiffies + REAPTIMEOUT_NODE + + ((unsigned long)cachep) % REAPTIMEOUT_NODE; + n->shared = new_shared; + n->alien = new_alien; + n->free_limit = (1 + nr_cpus_node(node)) * + cachep->batchcount + cachep->num; + cachep->node[node] = n; } - return err; + return 0; + fail: - err = -ENOMEM; - return err; + if (!cachep->list.next) { + /* Cache is not active yet. Roll back what we did */ + node--; + while (node >= 0) { + if (cachep->node[node]) { + n = cachep->node[node]; + + kfree(n->shared); + free_alien_cache(n->alien); + kfree(n); + cachep->node[node] = NULL; + } + node--; + } + } + return -ENOMEM; } struct ccupdate_struct { - kmem_cache_t *cachep; - struct array_cache *new[NR_CPUS]; + struct kmem_cache *cachep; + struct array_cache *new[0]; }; static void do_ccupdate_local(void *info) { - struct ccupdate_struct *new = (struct ccupdate_struct *)info; + struct ccupdate_struct *new = info; struct array_cache *old; check_irq_off(); - old = ac_data(new->cachep); + old = cpu_cache_get(new->cachep); new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; new->new[smp_processor_id()] = old; } - -static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount, - int shared) +/* Always called with the slab_mutex held */ +static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, + int batchcount, int shared, gfp_t gfp) { - struct ccupdate_struct new; - int i, err; + struct ccupdate_struct *new; + int i; + + new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), + gfp); + if (!new) + return -ENOMEM; - memset(&new.new,0,sizeof(new.new)); for_each_online_cpu(i) { - new.new[i] = alloc_arraycache(cpu_to_node(i), limit, batchcount); - if (!new.new[i]) { - for (i--; i >= 0; i--) kfree(new.new[i]); + new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, + batchcount, gfp); + if (!new->new[i]) { + for (i--; i >= 0; i--) + kfree(new->new[i]); + kfree(new); return -ENOMEM; } } - new.cachep = cachep; + new->cachep = cachep; - smp_call_function_all_cpus(do_ccupdate_local, (void *)&new); + on_each_cpu(do_ccupdate_local, (void *)new, 1); check_irq_on(); - spin_lock_irq(&cachep->spinlock); cachep->batchcount = batchcount; cachep->limit = limit; cachep->shared = shared; - spin_unlock_irq(&cachep->spinlock); for_each_online_cpu(i) { - struct array_cache *ccold = new.new[i]; + struct array_cache *ccold = new->new[i]; if (!ccold) continue; - spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); - free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); - spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); + spin_lock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); + free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i)); + spin_unlock_irq(&cachep->node[cpu_to_mem(i)]->list_lock); kfree(ccold); } + kfree(new); + return alloc_kmem_cache_node(cachep, gfp); +} - err = alloc_kmemlist(cachep); - if (err) { - printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n", - cachep->name, -err); - BUG(); +static int do_tune_cpucache(struct kmem_cache *cachep, int limit, + int batchcount, int shared, gfp_t gfp) +{ + int ret; + struct kmem_cache *c = NULL; + int i = 0; + + ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); + + if (slab_state < FULL) + return ret; + + if ((ret < 0) || !is_root_cache(cachep)) + return ret; + + VM_BUG_ON(!mutex_is_locked(&slab_mutex)); + for_each_memcg_cache_index(i) { + c = cache_from_memcg_idx(cachep, i); + if (c) + /* return value determined by the parent cache only */ + __do_tune_cpucache(c, limit, batchcount, shared, gfp); } - return 0; -} + return ret; +} -static void enable_cpucache(kmem_cache_t *cachep) +/* Called with slab_mutex held always */ +static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) { int err; - int limit, shared; + int limit = 0; + int shared = 0; + int batchcount = 0; + + if (!is_root_cache(cachep)) { + struct kmem_cache *root = memcg_root_cache(cachep); + limit = root->limit; + shared = root->shared; + batchcount = root->batchcount; + } - /* The head array serves three purposes: + if (limit && shared && batchcount) + goto skip_setup; + /* + * The head array serves three purposes: * - create a LIFO ordering, i.e. return objects that are cache-warm * - reduce the number of spinlock operations. - * - reduce the number of linked list operations on the slab and + * - reduce the number of linked list operations on the slab and * bufctl chains: array operations are cheaper. * The numbers are guessed, we should auto-tune as described by * Bonwick. */ - if (cachep->objsize > 131072) + if (cachep->size > 131072) limit = 1; - else if (cachep->objsize > PAGE_SIZE) + else if (cachep->size > PAGE_SIZE) limit = 8; - else if (cachep->objsize > 1024) + else if (cachep->size > 1024) limit = 24; - else if (cachep->objsize > 256) + else if (cachep->size > 256) limit = 54; else limit = 120; - /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound + /* + * CPU bound tasks (e.g. network routing) can exhibit cpu bound * allocation behaviour: Most allocs on one cpu, most free operations * on another cpu. For these cases, an efficient object passing between * cpus is necessary. This is provided by a shared array. The array @@ -3222,256 +3968,196 @@ static void enable_cpucache(kmem_cache_t *cachep) * to a larger limit. Thus disabled by default. */ shared = 0; -#ifdef CONFIG_SMP - if (cachep->objsize <= PAGE_SIZE) + if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) shared = 8; -#endif #if DEBUG - /* With debugging enabled, large batchcount lead to excessively - * long periods with disabled local interrupts. Limit the - * batchcount + /* + * With debugging enabled, large batchcount lead to excessively long + * periods with disabled local interrupts. Limit the batchcount */ if (limit > 32) limit = 32; #endif - err = do_tune_cpucache(cachep, limit, (limit+1)/2, shared); + batchcount = (limit + 1) / 2; +skip_setup: + err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); if (err) printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n", - cachep->name, -err); + cachep->name, -err); + return err; } -static void drain_array_locked(kmem_cache_t *cachep, - struct array_cache *ac, int force, int node) +/* + * Drain an array if it contains any elements taking the node lock only if + * necessary. Note that the node listlock also protects the array_cache + * if drain_array() is used on the shared array. + */ +static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, + struct array_cache *ac, int force, int node) { int tofree; - check_spinlock_acquired_node(cachep, node); + if (!ac || !ac->avail) + return; if (ac->touched && !force) { ac->touched = 0; - } else if (ac->avail) { - tofree = force ? ac->avail : (ac->limit+4)/5; - if (tofree > ac->avail) { - tofree = (ac->avail+1)/2; + } else { + spin_lock_irq(&n->list_lock); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) + tofree = (ac->avail + 1) / 2; + free_block(cachep, ac->entry, tofree, node); + ac->avail -= tofree; + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); } - free_block(cachep, ac->entry, tofree, node); - ac->avail -= tofree; - memmove(ac->entry, &(ac->entry[tofree]), - sizeof(void*)*ac->avail); + spin_unlock_irq(&n->list_lock); } } /** * cache_reap - Reclaim memory from caches. + * @w: work descriptor * * Called from workqueue/eventd every few seconds. * Purpose: * - clear the per-cpu caches for this CPU. * - return freeable pages to the main free memory pool. * - * If we cannot acquire the cache chain semaphore then just give up - we'll - * try again on the next iteration. + * If we cannot acquire the cache chain mutex then just give up - we'll try + * again on the next iteration. */ -static void cache_reap(void *unused) +static void cache_reap(struct work_struct *w) { - struct list_head *walk; - struct kmem_list3 *l3; + struct kmem_cache *searchp; + struct kmem_cache_node *n; + int node = numa_mem_id(); + struct delayed_work *work = to_delayed_work(w); - if (down_trylock(&cache_chain_sem)) { + if (!mutex_trylock(&slab_mutex)) /* Give up. Setup the next iteration. */ - schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); - return; - } - - list_for_each(walk, &cache_chain) { - kmem_cache_t *searchp; - struct list_head* p; - int tofree; - struct slab *slabp; - - searchp = list_entry(walk, kmem_cache_t, next); - - if (searchp->flags & SLAB_NO_REAP) - goto next; + goto out; + list_for_each_entry(searchp, &slab_caches, list) { check_irq_on(); - l3 = searchp->nodelists[numa_node_id()]; - if (l3->alien) - drain_alien_cache(searchp, l3); - spin_lock_irq(&l3->list_lock); - - drain_array_locked(searchp, ac_data(searchp), 0, - numa_node_id()); + /* + * We only take the node lock if absolutely necessary and we + * have established with reasonable certainty that + * we can do some work if the lock was obtained. + */ + n = searchp->node[node]; - if (time_after(l3->next_reap, jiffies)) - goto next_unlock; + reap_alien(searchp, n); - l3->next_reap = jiffies + REAPTIMEOUT_LIST3; + drain_array(searchp, n, cpu_cache_get(searchp), 0, node); - if (l3->shared) - drain_array_locked(searchp, l3->shared, 0, - numa_node_id()); + /* + * These are racy checks but it does not matter + * if we skip one check or scan twice. + */ + if (time_after(n->next_reap, jiffies)) + goto next; - if (l3->free_touched) { - l3->free_touched = 0; - goto next_unlock; - } + n->next_reap = jiffies + REAPTIMEOUT_NODE; - tofree = (l3->free_limit+5*searchp->num-1)/(5*searchp->num); - do { - p = l3->slabs_free.next; - if (p == &(l3->slabs_free)) - break; + drain_array(searchp, n, n->shared, 0, node); - slabp = list_entry(p, struct slab, list); - BUG_ON(slabp->inuse); - list_del(&slabp->list); - STATS_INC_REAPED(searchp); + if (n->free_touched) + n->free_touched = 0; + else { + int freed; - /* Safe to drop the lock. The slab is no longer - * linked to the cache. - * searchp cannot disappear, we hold - * cache_chain_lock - */ - l3->free_objects -= searchp->num; - spin_unlock_irq(&l3->list_lock); - slab_destroy(searchp, slabp); - spin_lock_irq(&l3->list_lock); - } while(--tofree > 0); -next_unlock: - spin_unlock_irq(&l3->list_lock); + freed = drain_freelist(searchp, n, (n->free_limit + + 5 * searchp->num - 1) / (5 * searchp->num)); + STATS_ADD_REAPED(searchp, freed); + } next: cond_resched(); } check_irq_on(); - up(&cache_chain_sem); - drain_remote_pages(); - /* Setup the next iteration */ - schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); -} - -#ifdef CONFIG_PROC_FS - -static void *s_start(struct seq_file *m, loff_t *pos) -{ - loff_t n = *pos; - struct list_head *p; - - down(&cache_chain_sem); - if (!n) { - /* - * Output format version, so at least we can change it - * without _too_ many complaints. - */ -#if STATS - seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); -#else - seq_puts(m, "slabinfo - version: 2.1\n"); -#endif - seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>"); - seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); - seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); -#if STATS - seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped>" - " <error> <maxfreeable> <nodeallocs> <remotefrees>"); - seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); -#endif - seq_putc(m, '\n'); - } - p = cache_chain.next; - while (n--) { - p = p->next; - if (p == &cache_chain) - return NULL; - } - return list_entry(p, kmem_cache_t, next); -} - -static void *s_next(struct seq_file *m, void *p, loff_t *pos) -{ - kmem_cache_t *cachep = p; - ++*pos; - return cachep->next.next == &cache_chain ? NULL - : list_entry(cachep->next.next, kmem_cache_t, next); -} - -static void s_stop(struct seq_file *m, void *p) -{ - up(&cache_chain_sem); + mutex_unlock(&slab_mutex); + next_reap_node(); +out: + /* Set up the next iteration */ + schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); } -static int s_show(struct seq_file *m, void *p) +#ifdef CONFIG_SLABINFO +void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) { - kmem_cache_t *cachep = p; - struct list_head *q; - struct slab *slabp; - unsigned long active_objs; - unsigned long num_objs; - unsigned long active_slabs = 0; - unsigned long num_slabs, free_objects = 0, shared_avail = 0; + struct page *page; + unsigned long active_objs; + unsigned long num_objs; + unsigned long active_slabs = 0; + unsigned long num_slabs, free_objects = 0, shared_avail = 0; const char *name; char *error = NULL; int node; - struct kmem_list3 *l3; + struct kmem_cache_node *n; - check_irq_on(); - spin_lock_irq(&cachep->spinlock); active_objs = 0; num_slabs = 0; for_each_online_node(node) { - l3 = cachep->nodelists[node]; - if (!l3) + n = cachep->node[node]; + if (!n) continue; - spin_lock(&l3->list_lock); + check_irq_on(); + spin_lock_irq(&n->list_lock); - list_for_each(q,&l3->slabs_full) { - slabp = list_entry(q, struct slab, list); - if (slabp->inuse != cachep->num && !error) + list_for_each_entry(page, &n->slabs_full, lru) { + if (page->active != cachep->num && !error) error = "slabs_full accounting error"; active_objs += cachep->num; active_slabs++; } - list_for_each(q,&l3->slabs_partial) { - slabp = list_entry(q, struct slab, list); - if (slabp->inuse == cachep->num && !error) - error = "slabs_partial inuse accounting error"; - if (!slabp->inuse && !error) - error = "slabs_partial/inuse accounting error"; - active_objs += slabp->inuse; + list_for_each_entry(page, &n->slabs_partial, lru) { + if (page->active == cachep->num && !error) + error = "slabs_partial accounting error"; + if (!page->active && !error) + error = "slabs_partial accounting error"; + active_objs += page->active; active_slabs++; } - list_for_each(q,&l3->slabs_free) { - slabp = list_entry(q, struct slab, list); - if (slabp->inuse && !error) - error = "slabs_free/inuse accounting error"; + list_for_each_entry(page, &n->slabs_free, lru) { + if (page->active && !error) + error = "slabs_free accounting error"; num_slabs++; } - free_objects += l3->free_objects; - shared_avail += l3->shared->avail; + free_objects += n->free_objects; + if (n->shared) + shared_avail += n->shared->avail; - spin_unlock(&l3->list_lock); + spin_unlock_irq(&n->list_lock); } - num_slabs+=active_slabs; - num_objs = num_slabs*cachep->num; + num_slabs += active_slabs; + num_objs = num_slabs * cachep->num; if (num_objs - active_objs != free_objects && !error) error = "free_objects accounting error"; - name = cachep->name; + name = cachep->name; if (error) printk(KERN_ERR "slab: cache %s error: %s\n", name, error); - seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", - name, active_objs, num_objs, cachep->objsize, - cachep->num, (1<<cachep->gfporder)); - seq_printf(m, " : tunables %4u %4u %4u", - cachep->limit, cachep->batchcount, - cachep->shared); - seq_printf(m, " : slabdata %6lu %6lu %6lu", - active_slabs, num_slabs, shared_avail); + sinfo->active_objs = active_objs; + sinfo->num_objs = num_objs; + sinfo->active_slabs = active_slabs; + sinfo->num_slabs = num_slabs; + sinfo->shared_avail = shared_avail; + sinfo->limit = cachep->limit; + sinfo->batchcount = cachep->batchcount; + sinfo->shared = cachep->shared; + sinfo->objects_per_slab = cachep->num; + sinfo->cache_order = cachep->gfporder; +} + +void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) +{ #if STATS - { /* list3 stats */ + { /* node stats */ unsigned long high = cachep->high_mark; unsigned long allocs = cachep->num_allocations; unsigned long grown = cachep->grown; @@ -3480,11 +4166,13 @@ static int s_show(struct seq_file *m, void *p) unsigned long max_freeable = cachep->max_freeable; unsigned long node_allocs = cachep->node_allocs; unsigned long node_frees = cachep->node_frees; + unsigned long overflows = cachep->node_overflow; - seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ - %4lu %4lu %4lu %4lu", - allocs, high, grown, reaped, errors, - max_freeable, node_allocs, node_frees); + seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " + "%4lu %4lu %4lu %4lu %4lu", + allocs, high, grown, + reaped, errors, max_freeable, node_allocs, + node_frees, overflows); } /* cpu stats */ { @@ -3494,35 +4182,11 @@ static int s_show(struct seq_file *m, void *p) unsigned long freemiss = atomic_read(&cachep->freemiss); seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu", - allochit, allocmiss, freehit, freemiss); + allochit, allocmiss, freehit, freemiss); } #endif - seq_putc(m, '\n'); - spin_unlock_irq(&cachep->spinlock); - return 0; } -/* - * slabinfo_op - iterator that generates /proc/slabinfo - * - * Output layout: - * cache-name - * num-active-objs - * total-objs - * object size - * num-active-slabs - * total-slabs - * num-pages-per-slab - * + further values on SMP and with statistics enabled - */ - -struct seq_operations slabinfo_op = { - .start = s_start, - .next = s_next, - .stop = s_stop, - .show = s_show, -}; - #define MAX_SLABINFO_WRITE 128 /** * slabinfo_write - Tuning for the slab allocator @@ -3532,17 +4196,17 @@ struct seq_operations slabinfo_op = { * @ppos: unused */ ssize_t slabinfo_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) + size_t count, loff_t *ppos) { - char kbuf[MAX_SLABINFO_WRITE+1], *tmp; + char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; int limit, batchcount, shared, res; - struct list_head *p; - + struct kmem_cache *cachep; + if (count > MAX_SLABINFO_WRITE) return -EINVAL; if (copy_from_user(&kbuf, buffer, count)) return -EFAULT; - kbuf[MAX_SLABINFO_WRITE] = '\0'; + kbuf[MAX_SLABINFO_WRITE] = '\0'; tmp = strchr(kbuf, ' '); if (!tmp) @@ -3553,29 +4217,198 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, return -EINVAL; /* Find the cache in the chain of caches. */ - down(&cache_chain_sem); + mutex_lock(&slab_mutex); res = -EINVAL; - list_for_each(p,&cache_chain) { - kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); - + list_for_each_entry(cachep, &slab_caches, list) { if (!strcmp(cachep->name, kbuf)) { - if (limit < 1 || - batchcount < 1 || - batchcount > limit || - shared < 0) { + if (limit < 1 || batchcount < 1 || + batchcount > limit || shared < 0) { res = 0; } else { res = do_tune_cpucache(cachep, limit, - batchcount, shared); + batchcount, shared, + GFP_KERNEL); } break; } } - up(&cache_chain_sem); + mutex_unlock(&slab_mutex); if (res >= 0) res = count; return res; } + +#ifdef CONFIG_DEBUG_SLAB_LEAK + +static void *leaks_start(struct seq_file *m, loff_t *pos) +{ + mutex_lock(&slab_mutex); + return seq_list_start(&slab_caches, *pos); +} + +static inline int add_caller(unsigned long *n, unsigned long v) +{ + unsigned long *p; + int l; + if (!v) + return 1; + l = n[1]; + p = n + 2; + while (l) { + int i = l/2; + unsigned long *q = p + 2 * i; + if (*q == v) { + q[1]++; + return 1; + } + if (*q > v) { + l = i; + } else { + p = q + 2; + l -= i + 1; + } + } + if (++n[1] == n[0]) + return 0; + memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n)); + p[0] = v; + p[1] = 1; + return 1; +} + +static void handle_slab(unsigned long *n, struct kmem_cache *c, + struct page *page) +{ + void *p; + int i; + + if (n[0] == n[1]) + return; + for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { + if (get_obj_status(page, i) != OBJECT_ACTIVE) + continue; + + if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) + return; + } +} + +static void show_symbol(struct seq_file *m, unsigned long address) +{ +#ifdef CONFIG_KALLSYMS + unsigned long offset, size; + char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN]; + + if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { + seq_printf(m, "%s+%#lx/%#lx", name, offset, size); + if (modname[0]) + seq_printf(m, " [%s]", modname); + return; + } +#endif + seq_printf(m, "%p", (void *)address); +} + +static int leaks_show(struct seq_file *m, void *p) +{ + struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); + struct page *page; + struct kmem_cache_node *n; + const char *name; + unsigned long *x = m->private; + int node; + int i; + + if (!(cachep->flags & SLAB_STORE_USER)) + return 0; + if (!(cachep->flags & SLAB_RED_ZONE)) + return 0; + + /* OK, we can do it */ + + x[1] = 0; + + for_each_online_node(node) { + n = cachep->node[node]; + if (!n) + continue; + + check_irq_on(); + spin_lock_irq(&n->list_lock); + + list_for_each_entry(page, &n->slabs_full, lru) + handle_slab(x, cachep, page); + list_for_each_entry(page, &n->slabs_partial, lru) + handle_slab(x, cachep, page); + spin_unlock_irq(&n->list_lock); + } + name = cachep->name; + if (x[0] == x[1]) { + /* Increase the buffer size */ + mutex_unlock(&slab_mutex); + m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + if (!m->private) { + /* Too bad, we are really out */ + m->private = x; + mutex_lock(&slab_mutex); + return -ENOMEM; + } + *(unsigned long *)m->private = x[0] * 2; + kfree(x); + mutex_lock(&slab_mutex); + /* Now make sure this entry will be retried */ + m->count = m->size; + return 0; + } + for (i = 0; i < x[1]; i++) { + seq_printf(m, "%s: %lu ", name, x[2*i+3]); + show_symbol(m, x[2*i+2]); + seq_putc(m, '\n'); + } + + return 0; +} + +static const struct seq_operations slabstats_op = { + .start = leaks_start, + .next = slab_next, + .stop = slab_stop, + .show = leaks_show, +}; + +static int slabstats_open(struct inode *inode, struct file *file) +{ + unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); + int ret = -ENOMEM; + if (n) { + ret = seq_open(file, &slabstats_op); + if (!ret) { + struct seq_file *m = file->private_data; + *n = PAGE_SIZE / (2 * sizeof(unsigned long)); + m->private = n; + n = NULL; + } + kfree(n); + } + return ret; +} + +static const struct file_operations proc_slabstats_operations = { + .open = slabstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; +#endif + +static int __init slab_proc_init(void) +{ +#ifdef CONFIG_DEBUG_SLAB_LEAK + proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); +#endif + return 0; +} +module_init(slab_proc_init); #endif /** @@ -3590,33 +4423,12 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, * allocated with either kmalloc() or kmem_cache_alloc(). The object * must not be freed during the duration of the call. */ -unsigned int ksize(const void *objp) +size_t ksize(const void *objp) { - if (unlikely(objp == NULL)) + BUG_ON(!objp); + if (unlikely(objp == ZERO_SIZE_PTR)) return 0; - return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); -} - - -/* - * kstrdup - allocate space for and copy an existing string - * - * @s: the string to duplicate - * @gfp: the GFP mask used in the kmalloc() call when allocating memory - */ -char *kstrdup(const char *s, gfp_t gfp) -{ - size_t len; - char *buf; - - if (!s) - return NULL; - - len = strlen(s) + 1; - buf = kmalloc(len, gfp); - if (buf) - memcpy(buf, s, len); - return buf; + return virt_to_cache(objp)->object_size; } -EXPORT_SYMBOL(kstrdup); +EXPORT_SYMBOL(ksize); |
