aboutsummaryrefslogtreecommitdiff
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c393
1 files changed, 196 insertions, 197 deletions
diff --git a/mm/slob.c b/mm/slob.c
index e2c3c0ec546..21980e0f39a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -28,9 +28,8 @@
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
* alloc_pages() directly, allocating compound pages so the page order
- * does not have to be separately tracked, and also stores the exact
- * allocation size in page->private so that it can be used to accurately
- * provide ksize(). These objects are detected in kfree() because slob_page()
+ * does not have to be separately tracked.
+ * These objects are detected in kfree() because PageSlab()
* is false for them.
*
* SLAB is emulated on top of SLOB by simply calling constructors and
@@ -46,7 +45,7 @@
* NUMA support in SLOB is fairly simplistic, pushing most of the real
* logic down to the page allocator, and simply doing the node accounting
* on the upper levels. In the event that a node id is explicitly
- * provided, alloc_pages_node() with the specified node id is used
+ * provided, alloc_pages_exact_node() with the specified node id is used
* instead. The common case (or when the node id isn't explicitly provided)
* will default to the current node, as per numa_node_id().
*
@@ -59,14 +58,21 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+
#include <linux/mm.h>
+#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
-#include <asm/atomic.h>
+#include <linux/kmemleak.h>
+
+#include <trace/events/kmem.h>
+#include <linux/atomic.h>
+
+#include "slab.h"
/*
* slob_block has a field 'units', which indicates size of block if +ve,
* or offset of next block if -ve (in SLOB_UNITs).
@@ -87,36 +93,6 @@ struct slob_block {
typedef struct slob_block slob_t;
/*
- * We use struct page fields to manage some slob allocation aspects,
- * however to avoid the horrible mess in include/linux/mm_types.h, we'll
- * just define our own struct page type variant here.
- */
-struct slob_page {
- union {
- struct {
- unsigned long flags; /* mandatory */
- atomic_t _count; /* mandatory */
- slobidx_t units; /* free units left in page */
- unsigned long pad[2];
- slob_t *free; /* first free slob_t in page */
- struct list_head list; /* linked list of free pages */
- };
- struct page page;
- };
-};
-static inline void struct_slob_page_wrong_size(void)
-{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
-
-/*
- * free_slob_page: call before a slob_page is returned to the page allocator.
- */
-static inline void free_slob_page(struct slob_page *sp)
-{
- reset_page_mapcount(&sp->page);
- sp->page.mapping = NULL;
-}
-
-/*
* All partially free slob pages go on these lists.
*/
#define SLOB_BREAK1 256
@@ -126,46 +102,27 @@ static LIST_HEAD(free_slob_medium);
static LIST_HEAD(free_slob_large);
/*
- * slob_page: True for all slob pages (false for bigblock pages)
- */
-static inline int slob_page(struct slob_page *sp)
-{
- return test_bit(PG_active, &sp->flags);
-}
-
-static inline void set_slob_page(struct slob_page *sp)
-{
- __set_bit(PG_active, &sp->flags);
-}
-
-static inline void clear_slob_page(struct slob_page *sp)
-{
- __clear_bit(PG_active, &sp->flags);
-}
-
-/*
* slob_page_free: true for pages on free_slob_pages list.
*/
-static inline int slob_page_free(struct slob_page *sp)
+static inline int slob_page_free(struct page *sp)
{
- return test_bit(PG_private, &sp->flags);
+ return PageSlobFree(sp);
}
-static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
+static void set_slob_page_free(struct page *sp, struct list_head *list)
{
- list_add(&sp->list, list);
- __set_bit(PG_private, &sp->flags);
+ list_add(&sp->lru, list);
+ __SetPageSlobFree(sp);
}
-static inline void clear_slob_page_free(struct slob_page *sp)
+static inline void clear_slob_page_free(struct page *sp)
{
- list_del(&sp->list);
- __clear_bit(PG_private, &sp->flags);
+ list_del(&sp->lru);
+ __ClearPageSlobFree(sp);
}
#define SLOB_UNIT sizeof(slob_t)
-#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
-#define SLOB_ALIGN L1_CACHE_BYTES
+#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT)
/*
* struct slob_rcu is inserted at the tail of allocated slob blocks, which
@@ -230,13 +187,13 @@ static int slob_last(slob_t *s)
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
-static void *slob_new_page(gfp_t gfp, int order, int node)
+static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
#ifdef CONFIG_NUMA
- if (node != -1)
- page = alloc_pages_node(node, gfp, order);
+ if (node != NUMA_NO_NODE)
+ page = alloc_pages_exact_node(node, gfp, order);
else
#endif
page = alloc_pages(gfp, order);
@@ -247,15 +204,22 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
return page_address(page);
}
+static void slob_free_pages(void *b, int order)
+{
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += 1 << order;
+ free_pages((unsigned long)b, order);
+}
+
/*
* Allocate a slob block within a given slob_page sp.
*/
-static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
+static void *slob_page_alloc(struct page *sp, size_t size, int align)
{
- slob_t *prev, *cur, *aligned = 0;
+ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
- for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
+ for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
slobidx_t avail = slob_units(cur);
if (align) {
@@ -279,12 +243,12 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
if (prev)
set_slob(prev, slob_units(prev), next);
else
- sp->free = next;
+ sp->freelist = next;
} else { /* fragment */
if (prev)
set_slob(prev, slob_units(prev), cur + units);
else
- sp->free = cur + units;
+ sp->freelist = cur + units;
set_slob(cur + units, avail - units, next);
}
@@ -303,7 +267,7 @@ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
*/
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
- struct slob_page *sp;
+ struct page *sp;
struct list_head *prev;
struct list_head *slob_list;
slob_t *b = NULL;
@@ -318,13 +282,13 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, slob_list, list) {
+ list_for_each_entry(sp, slob_list, lru) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
* page with a matching node id in the freelist.
*/
- if (node != -1 && page_to_nid(&sp->page) != node)
+ if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
continue;
#endif
/* Enough room on this page? */
@@ -332,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
continue;
/* Attempt to alloc */
- prev = sp->list.prev;
+ prev = sp->lru.prev;
b = slob_page_alloc(sp, size, align);
if (!b)
continue;
@@ -349,16 +313,16 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Not enough space: must allocate a new page */
if (!b) {
- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- return 0;
- sp = (struct slob_page *)virt_to_page(b);
- set_slob_page(sp);
+ return NULL;
+ sp = virt_to_page(b);
+ __SetPageSlab(sp);
spin_lock_irqsave(&slob_lock, flags);
sp->units = SLOB_UNITS(PAGE_SIZE);
- sp->free = b;
- INIT_LIST_HEAD(&sp->list);
+ sp->freelist = b;
+ INIT_LIST_HEAD(&sp->lru);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
@@ -375,16 +339,17 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
*/
static void slob_free(void *block, int size)
{
- struct slob_page *sp;
+ struct page *sp;
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
+ struct list_head *slob_list;
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
BUG_ON(!size);
- sp = (struct slob_page *)virt_to_page(block);
+ sp = virt_to_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
@@ -393,20 +358,27 @@ static void slob_free(void *block, int size)
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
- clear_slob_page(sp);
- free_slob_page(sp);
- free_page((unsigned long)b);
- goto out;
+ spin_unlock_irqrestore(&slob_lock, flags);
+ __ClearPageSlab(sp);
+ page_mapcount_reset(sp);
+ slob_free_pages(b, 0);
+ return;
}
if (!slob_page_free(sp)) {
/* This slob page is about to become partially free. Easy! */
sp->units = units;
- sp->free = b;
+ sp->freelist = b;
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
- set_slob_page_free(sp, &free_slob_small);
+ if (size < SLOB_BREAK1)
+ slob_list = &free_slob_small;
+ else if (size < SLOB_BREAK2)
+ slob_list = &free_slob_medium;
+ else
+ slob_list = &free_slob_large;
+ set_slob_page_free(sp, slob_list);
goto out;
}
@@ -416,15 +388,15 @@ static void slob_free(void *block, int size)
*/
sp->units += units;
- if (b < sp->free) {
- if (b + units == sp->free) {
- units += slob_units(sp->free);
- sp->free = slob_next(sp->free);
+ if (b < (slob_t *)sp->freelist) {
+ if (b + units == sp->freelist) {
+ units += slob_units(sp->freelist);
+ sp->freelist = slob_next(sp->freelist);
}
- set_slob(b, units, sp->free);
- sp->free = b;
+ set_slob(b, units, sp->freelist);
+ sp->freelist = b;
} else {
- prev = sp->free;
+ prev = sp->freelist;
next = slob_next(prev);
while (b > next) {
prev = next;
@@ -451,140 +423,171 @@ out:
* End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
*/
-#ifndef ARCH_KMALLOC_MINALIGN
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
-#endif
-
-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
+static __always_inline void *
+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{
unsigned int *m;
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ void *ret;
+
+ gfp &= gfp_allowed_mask;
+
+ lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
if (!size)
return ZERO_SIZE_PTR;
m = slob_alloc(size + align, gfp, align, node);
- if (m)
- *m = size;
- return (void *)m + align;
+
+ if (!m)
+ return NULL;
+ *m = size;
+ ret = (void *)m + align;
+
+ trace_kmalloc_node(caller, ret,
+ size, size + align, gfp, node);
} else {
- void *ret;
+ unsigned int order = get_order(size);
- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
- if (ret) {
- struct page *page;
- page = virt_to_page(ret);
- page->private = size;
- }
- return ret;
+ if (likely(order))
+ gfp |= __GFP_COMP;
+ ret = slob_new_pages(gfp, order, node);
+
+ trace_kmalloc_node(caller, ret,
+ size, PAGE_SIZE << order, gfp, node);
}
+
+ kmemleak_alloc(ret, size, 1, gfp);
+ return ret;
}
-EXPORT_SYMBOL(__kmalloc_node);
+
+void *__kmalloc(size_t size, gfp_t gfp)
+{
+ return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc);
+
+#ifdef CONFIG_TRACING
+void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller)
+{
+ return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller);
+}
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node_track_caller(size_t size, gfp_t gfp,
+ int node, unsigned long caller)
+{
+ return __do_kmalloc_node(size, gfp, node, caller);
+}
+#endif
+#endif
void kfree(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
+
+ trace_kfree(_RET_IP_, block);
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
+ kmemleak_free(block);
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp)) {
- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ sp = virt_to_page(block);
+ if (PageSlab(sp)) {
+ int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
} else
- put_page(&sp->page);
+ __free_pages(sp, compound_order(sp));
}
EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
{
- struct slob_page *sp;
+ struct page *sp;
+ int align;
+ unsigned int *m;
BUG_ON(!block);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- sp = (struct slob_page *)virt_to_page(block);
- if (slob_page(sp))
- return ((slob_t *)block - 1)->units + SLOB_UNIT;
- else
- return sp->page.private;
+ sp = virt_to_page(block);
+ if (unlikely(!PageSlab(sp)))
+ return PAGE_SIZE << compound_order(sp);
+
+ align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+ m = (unsigned int *)(block - align);
+ return SLOB_UNITS(*m) * SLOB_UNIT;
}
EXPORT_SYMBOL(ksize);
-struct kmem_cache {
- unsigned int size, align;
- unsigned long flags;
- const char *name;
- void (*ctor)(struct kmem_cache *, void *);
-};
+int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
+{
+ if (flags & SLAB_DESTROY_BY_RCU) {
+ /* leave room for rcu footer at the end of object */
+ c->size += sizeof(struct slob_rcu);
+ }
+ c->flags = flags;
+ return 0;
+}
-struct kmem_cache *kmem_cache_create(const char *name, size_t size,
- size_t align, unsigned long flags,
- void (*ctor)(struct kmem_cache *, void *))
+void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
- struct kmem_cache *c;
+ void *b;
- c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
+ flags &= gfp_allowed_mask;
- if (c) {
- c->name = name;
- c->size = size;
- if (flags & SLAB_DESTROY_BY_RCU) {
- /* leave room for rcu footer at the end of object */
- c->size += sizeof(struct slob_rcu);
- }
- c->flags = flags;
- c->ctor = ctor;
- /* ignore alignment unless it's forced */
- c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
- if (c->align < ARCH_SLAB_MINALIGN)
- c->align = ARCH_SLAB_MINALIGN;
- if (c->align < align)
- c->align = align;
- } else if (flags & SLAB_PANIC)
- panic("Cannot create slab cache %s\n", name);
-
- return c;
+ lockdep_trace_alloc(flags);
+
+ if (c->size < PAGE_SIZE) {
+ b = slob_alloc(c->size, flags, c->align, node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+ SLOB_UNITS(c->size) * SLOB_UNIT,
+ flags, node);
+ } else {
+ b = slob_new_pages(flags, get_order(c->size), node);
+ trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
+ PAGE_SIZE << get_order(c->size),
+ flags, node);
+ }
+
+ if (b && c->ctor)
+ c->ctor(b);
+
+ kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
+ return b;
}
-EXPORT_SYMBOL(kmem_cache_create);
+EXPORT_SYMBOL(slob_alloc_node);
-void kmem_cache_destroy(struct kmem_cache *c)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- slob_free(c, sizeof(struct kmem_cache));
+ return slob_alloc_node(cachep, flags, NUMA_NO_NODE);
}
-EXPORT_SYMBOL(kmem_cache_destroy);
+EXPORT_SYMBOL(kmem_cache_alloc);
-void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
{
- void *b;
-
- if (c->size < PAGE_SIZE)
- b = slob_alloc(c->size, flags, c->align, node);
- else
- b = slob_new_page(flags, get_order(c->size), node);
-
- if (c->ctor)
- c->ctor(c, b);
+ return __do_kmalloc_node(size, gfp, node, _RET_IP_);
+}
+EXPORT_SYMBOL(__kmalloc_node);
- return b;
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node)
+{
+ return slob_alloc_node(cachep, gfp, node);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
+#endif
static void __kmem_cache_free(void *b, int size)
{
if (size < PAGE_SIZE)
slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(size));
+ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
@@ -597,49 +600,45 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
+ kmemleak_free_recursive(b, c->flags);
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
- INIT_RCU_HEAD(&slob_rcu->head);
slob_rcu->size = c->size;
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
__kmem_cache_free(b, c->size);
}
-}
-EXPORT_SYMBOL(kmem_cache_free);
-
-unsigned int kmem_cache_size(struct kmem_cache *c)
-{
- return c->size;
-}
-EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *c)
-{
- return c->name;
+ trace_kmem_cache_free(_RET_IP_, b);
}
-EXPORT_SYMBOL(kmem_cache_name);
+EXPORT_SYMBOL(kmem_cache_free);
-int kmem_cache_shrink(struct kmem_cache *d)
+int __kmem_cache_shutdown(struct kmem_cache *c)
{
+ /* No way to check for remaining objects */
return 0;
}
-EXPORT_SYMBOL(kmem_cache_shrink);
-int kmem_ptr_validate(struct kmem_cache *a, const void *b)
+int __kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
}
-static unsigned int slob_ready __read_mostly;
+struct kmem_cache kmem_cache_boot = {
+ .name = "kmem_cache",
+ .size = sizeof(struct kmem_cache),
+ .flags = SLAB_PANIC,
+ .align = ARCH_KMALLOC_MINALIGN,
+};
-int slab_is_available(void)
+void __init kmem_cache_init(void)
{
- return slob_ready;
+ kmem_cache = &kmem_cache_boot;
+ slab_state = UP;
}
-void __init kmem_cache_init(void)
+void __init kmem_cache_init_late(void)
{
- slob_ready = 1;
+ slab_state = FULL;
}