aboutsummaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c95
1 files changed, 60 insertions, 35 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 09ccee8fb58..4907563ef7f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -366,7 +366,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
const char *n)
{
VM_BUG_ON(!irqs_disabled());
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
@@ -400,7 +401,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_new, unsigned long counters_new,
const char *n)
{
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist, &page->counters,
freelist_old, counters_old,
@@ -570,7 +572,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
va_end(args);
printk(KERN_ERR "========================================"
"=====================================\n");
- printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
+ printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
printk(KERN_ERR "----------------------------------------"
"-------------------------------------\n\n");
}
@@ -1901,11 +1903,14 @@ static void unfreeze_partials(struct kmem_cache *s)
}
if (l != m) {
- if (l == M_PARTIAL)
+ if (l == M_PARTIAL) {
remove_partial(n, page);
- else
+ stat(s, FREE_REMOVE_PARTIAL);
+ } else {
add_partial(n, page,
DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
+ }
l = m;
}
@@ -1978,7 +1983,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;
- } while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+ } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
@@ -2124,6 +2129,37 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
}
/*
+ * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
+ * or deactivate the page.
+ *
+ * The page is still frozen if the return value is not NULL.
+ *
+ * If this function returns NULL then the page has been unfrozen.
+ */
+static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+{
+ struct page new;
+ unsigned long counters;
+ void *freelist;
+
+ do {
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ VM_BUG_ON(!new.frozen);
+
+ new.inuse = page->objects;
+ new.frozen = freelist != NULL;
+
+ } while (!cmpxchg_double_slab(s, page,
+ freelist, counters,
+ NULL, new.counters,
+ "get_freelist"));
+
+ return freelist;
+}
+
+/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
*
@@ -2144,8 +2180,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
{
void **object;
unsigned long flags;
- struct page new;
- unsigned long counters;
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
@@ -2166,31 +2200,14 @@ redo:
goto new_slab;
}
- stat(s, ALLOC_SLOWPATH);
-
- do {
- object = c->page->freelist;
- counters = c->page->counters;
- new.counters = counters;
- VM_BUG_ON(!new.frozen);
-
- /*
- * If there is no object left then we use this loop to
- * deactivate the slab which is simple since no objects
- * are left in the slab and therefore we do not need to
- * put the page back onto the partial list.
- *
- * If there are objects left then we retrieve them
- * and use them to refill the per cpu queue.
- */
+ /* must check again c->freelist in case of cpu migration or IRQ */
+ object = c->freelist;
+ if (object)
+ goto load_freelist;
- new.inuse = c->page->objects;
- new.frozen = object != NULL;
+ stat(s, ALLOC_SLOWPATH);
- } while (!__cmpxchg_double_slab(s, c->page,
- object, counters,
- NULL, new.counters,
- "__slab_alloc"));
+ object = get_freelist(s, c->page);
if (!object) {
c->page = NULL;
@@ -2304,7 +2321,7 @@ redo:
* Since this is without lock semantics the protection is only against
* code executing on this cpu *not* from access by other cpus.
*/
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
get_freepointer_safe(s, object), next_tid(tid)))) {
@@ -2534,7 +2551,7 @@ redo:
if (likely(page == c->page)) {
set_freepointer(s, object, c->freelist);
- if (unlikely(!irqsafe_cpu_cmpxchg_double(
+ if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
c->freelist, tid,
object, next_tid(tid)))) {
@@ -2999,7 +3016,8 @@ static int kmem_cache_open(struct kmem_cache *s,
}
}
-#ifdef CONFIG_CMPXCHG_DOUBLE
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
/* Enable fast mode */
s->flags |= __CMPXCHG_DOUBLE;
@@ -3028,7 +3046,9 @@ static int kmem_cache_open(struct kmem_cache *s,
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
*/
- if (s->size >= PAGE_SIZE)
+ if (kmem_cache_debug(s))
+ s->cpu_partial = 0;
+ else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
else if (s->size >= 1024)
s->cpu_partial = 6;
@@ -3654,6 +3674,9 @@ void __init kmem_cache_init(void)
struct kmem_cache *temp_kmem_cache_node;
unsigned long kmalloc_size;
+ if (debug_guardpage_minorder())
+ slub_max_order = 0;
+
kmem_size = offsetof(struct kmem_cache, node) +
nr_node_ids * sizeof(struct kmem_cache_node *);
@@ -4634,6 +4657,8 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
err = strict_strtoul(buf, 10, &objects);
if (err)
return err;
+ if (objects && kmem_cache_debug(s))
+ return -EINVAL;
s->cpu_partial = objects;
flush_all(s);