aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-30 08:21:48 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-30 08:21:48 -1000
commitc11abbbaa3252875c5740a6880b9a1a6f1e2a870 (patch)
tree692143f7edd1157ef499bff21143e0d6df7cace5 /include
parent1d3fe4a75b691285cded47c9f1a91b30d25287b0 (diff)
parent9e577e8b46ab0c38970c0f0cd7eae62e6dffddee (diff)
Merge branch 'slub/lockless' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'slub/lockless' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (21 commits) slub: When allocating a new slab also prep the first object slub: disable interrupts in cmpxchg_double_slab when falling back to pagelock Avoid duplicate _count variables in page_struct Revert "SLUB: Fix build breakage in linux/mm_types.h" SLUB: Fix build breakage in linux/mm_types.h slub: slabinfo update for cmpxchg handling slub: Not necessary to check for empty slab on load_freelist slub: fast release on full slab slub: Add statistics for the case that the current slab does not match the node slub: Get rid of the another_slab label slub: Avoid disabling interrupts in free slowpath slub: Disable interrupts in free_debug processing slub: Invert locking and avoid slab lock slub: Rework allocator fastpaths slub: Pass kmem_cache struct to lock and freeze slab slub: explicit list_lock taking slub: Add cmpxchg_double_slab() mm: Rearrange struct page slub: Move page->frozen handling near where the page->freelist handling occurs slub: Do not use frozen page flag but a bit in the page counters ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h89
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/slub_def.h3
3 files changed, 64 insertions, 33 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 027935c86c6..774b8952deb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -30,23 +30,61 @@ struct address_space;
* moment. Note that we have no way to track which tasks are using
* a page, though if it is a pagecache page, rmap structures can tell us
* who is mapping it.
+ *
+ * The objects in struct page are organized in double word blocks in
+ * order to allows us to use atomic double word operations on portions
+ * of struct page. That is currently only used by slub but the arrangement
+ * allows the use of atomic double word operations on the flags/mapping
+ * and lru list pointers also.
*/
struct page {
+ /* First double word block */
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
- atomic_t _count; /* Usage count, see below. */
- union {
- atomic_t _mapcount; /* Count of ptes mapped in mms,
- * to show when page is mapped
- * & limit reverse map searches.
+ struct address_space *mapping; /* If low bit clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, low bit is set, and
+ * it points to anon_vma object:
+ * see PAGE_MAPPING_ANON below.
*/
- struct { /* SLUB */
- u16 inuse;
- u16 objects;
+ /* Second double word */
+ struct {
+ union {
+ pgoff_t index; /* Our offset within mapping. */
+ void *freelist; /* slub first free object */
+ };
+
+ union {
+ /* Used for cmpxchg_double in slub */
+ unsigned long counters;
+
+ struct {
+
+ union {
+ atomic_t _mapcount; /* Count of ptes mapped in mms,
+ * to show when page is mapped
+ * & limit reverse map searches.
+ */
+
+ struct {
+ unsigned inuse:16;
+ unsigned objects:15;
+ unsigned frozen:1;
+ };
+ };
+ atomic_t _count; /* Usage count, see below. */
+ };
};
};
+
+ /* Third double word block */
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ */
+
+ /* Remainder is not double word aligned */
union {
- struct {
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
@@ -54,27 +92,13 @@ struct page {
* indicates order in the buddy
* system if PG_buddy is set.
*/
- struct address_space *mapping; /* If low bit clear, points to
- * inode address_space, or NULL.
- * If page mapped as anonymous
- * memory, low bit is set, and
- * it points to anon_vma object:
- * see PAGE_MAPPING_ANON below.
- */
- };
#if USE_SPLIT_PTLOCKS
- spinlock_t ptl;
+ spinlock_t ptl;
#endif
- struct kmem_cache *slab; /* SLUB: Pointer to slab */
- struct page *first_page; /* Compound tail pages */
- };
- union {
- pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* SLUB: freelist req. slab lock */
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct page *first_page; /* Compound tail pages */
};
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone->lru_lock !
- */
+
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
@@ -100,7 +124,16 @@ struct page {
*/
void *shadow;
#endif
-};
+}
+/*
+ * If another subsystem starts using the double word pairing for atomic
+ * operations on struct page then it must change the #if to ensure
+ * proper alignment of the page struct.
+ */
+#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
+ __attribute__((__aligned__(2*sizeof(unsigned long))))
+#endif
+;
typedef unsigned long __nocast vm_flags_t;
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3e5a1b189a4..e90a673be67 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -124,9 +124,6 @@ enum pageflags {
/* SLOB */
PG_slob_free = PG_private,
-
- /* SLUB */
- PG_slub_frozen = PG_active,
};
#ifndef __GENERATING_BOUNDS_H
@@ -212,8 +209,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobFree, slob_free)
-__PAGEFLAG(SlubFrozen, slub_frozen)
-
/*
* Private page markings that may be used by the filesystem that owns the page
* for its own purposes.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 4b35c06dfbc..f58d6413d23 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -24,6 +24,7 @@ enum stat_item {
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
ALLOC_SLAB, /* Cpu slab acquired from page allocator */
ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ ALLOC_NODE_MISMATCH, /* Switching cpu slab */
FREE_SLAB, /* Slab freed to the page allocator */
CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
@@ -31,8 +32,10 @@ enum stat_item {
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ DEACTIVATE_BYPASS, /* Implicit deactivation */
ORDER_FALLBACK, /* Number of times fallback was necessary */
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
+ CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {