diff options
Diffstat (limited to 'include/linux/page-flags.h')
| -rw-r--r-- | include/linux/page-flags.h | 284 |
1 files changed, 239 insertions, 45 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c74d3e87531..8304959ad33 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -6,9 +6,11 @@ #define PAGE_FLAGS_H #include <linux/types.h> +#include <linux/bug.h> +#include <linux/mmdebug.h> #ifndef __GENERATING_BOUNDS_H #include <linux/mm_types.h> -#include <linux/bounds.h> +#include <generated/bounds.h> #endif /* !__GENERATING_BOUNDS_H */ /* @@ -48,9 +50,9 @@ * struct page (these bits with information) are always mapped into kernel * address space... * - * PG_buddy is set to indicate that the page is free and in the buddy system - * (see mm/page_alloc.c). - * + * PG_hwpoison indicates that a page got corrupted in hardware and contains + * data with incorrect ECC bits that triggered a machine check. Accessing is + * not safe since it may cause another machine check. Don't touch! */ /* @@ -82,6 +84,7 @@ enum pageflags { PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ + PG_private_2, /* If pagecache, has fs aux data */ PG_writeback, /* Page is under writeback */ #ifdef CONFIG_PAGEFLAGS_EXTENDED PG_head, /* A head page */ @@ -92,26 +95,37 @@ enum pageflags { PG_swapcache, /* Swap page: swp_entry_t in private */ PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ - PG_buddy, /* Page is free, on buddy lists */ -#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR + PG_swapbacked, /* Page is backed by RAM/swap */ + PG_unevictable, /* Page is "unevictable" */ +#ifdef CONFIG_MMU + PG_mlocked, /* Page is vma mlocked */ +#endif +#ifdef CONFIG_ARCH_USES_PG_UNCACHED PG_uncached, /* Page has been mapped as uncached */ #endif +#ifdef CONFIG_MEMORY_FAILURE + PG_hwpoison, /* hardware poisoned page. Don't touch */ +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + PG_compound_lock, +#endif __NR_PAGEFLAGS, /* Filesystems */ PG_checked = PG_owner_priv_1, + /* Two page bits are conscripted by FS-Cache to maintain local caching + * state. These bits are set on pages belonging to the netfs's inodes + * when those inodes are being locally cached. + */ + PG_fscache = PG_private_2, /* page backed by cache */ + /* XEN */ PG_pinned = PG_owner_priv_1, PG_savepinned = PG_dirty, /* SLOB */ - PG_slob_page = PG_active, PG_slob_free = PG_private, - - /* SLUB */ - PG_slub_frozen = PG_active, - PG_slub_debug = PG_error, }; #ifndef __GENERATING_BOUNDS_H @@ -120,7 +134,7 @@ enum pageflags { * Macros to create function definitions for page flags */ #define TESTPAGEFLAG(uname, lname) \ -static inline int Page##uname(struct page *page) \ +static inline int Page##uname(const struct page *page) \ { return test_bit(PG_##lname, &page->flags); } #define SETPAGEFLAG(uname, lname) \ @@ -147,6 +161,9 @@ static inline int TestSetPage##uname(struct page *page) \ static inline int TestClearPage##uname(struct page *page) \ { return test_and_clear_bit(PG_##lname, &page->flags); } +#define __TESTCLEARFLAG(uname, lname) \ +static inline int __TestClearPage##uname(struct page *page) \ + { return __test_and_clear_bit(PG_##lname, &page->flags); } #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) @@ -155,45 +172,67 @@ static inline int TestClearPage##uname(struct page *page) \ __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) #define PAGEFLAG_FALSE(uname) \ -static inline int Page##uname(struct page *page) \ +static inline int Page##uname(const struct page *page) \ { return 0; } #define TESTSCFLAG(uname, lname) \ TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) +#define SETPAGEFLAG_NOOP(uname) \ +static inline void SetPage##uname(struct page *page) { } + +#define CLEARPAGEFLAG_NOOP(uname) \ +static inline void ClearPage##uname(struct page *page) { } + +#define __CLEARPAGEFLAG_NOOP(uname) \ +static inline void __ClearPage##uname(struct page *page) { } + +#define TESTCLEARFLAG_FALSE(uname) \ +static inline int TestClearPage##uname(struct page *page) { return 0; } + +#define __TESTCLEARFLAG_FALSE(uname) \ +static inline int __TestClearPage##uname(struct page *page) { return 0; } + struct page; /* forward declaration */ TESTPAGEFLAG(Locked, locked) -PAGEFLAG(Error, error) +PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error) PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) + __SETPAGEFLAG(Referenced, referenced) PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) + TESTCLEARFLAG(Active, active) __PAGEFLAG(Slab, slab) PAGEFLAG(Checked, checked) /* Used by some filesystems */ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ PAGEFLAG(SavePinned, savepinned); /* Xen */ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) -PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) - __SETPAGEFLAG(Private, private) +PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) + __SETPAGEFLAG(SwapBacked, swapbacked) -__PAGEFLAG(SlobPage, slob_page) __PAGEFLAG(SlobFree, slob_free) -__PAGEFLAG(SlubFrozen, slub_frozen) -__PAGEFLAG(SlubDebug, slub_debug) +/* + * Private page markings that may be used by the filesystem that owns the page + * for its own purposes. + * - PG_private and PG_private_2 cause releasepage() and co to be invoked + */ +PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) + __CLEARPAGEFLAG(Private, private) +PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) +PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) /* * Only test-and-set exist for PG_writeback. The unconditional operators are * risky: they bypass page accounting. */ TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) -__PAGEFLAG(Buddy, buddy) PAGEFLAG(MappedToDisk, mappedtodisk) -/* PG_readahead is only used for file reads; PG_reclaim is only for writes */ +/* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) -PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ +PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim) #ifdef CONFIG_HIGHMEM /* @@ -209,14 +248,37 @@ PAGEFLAG_FALSE(HighMem) PAGEFLAG(SwapCache, swapcache) #else PAGEFLAG_FALSE(SwapCache) + SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache) +#endif + +PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) + TESTCLEARFLAG(Unevictable, unevictable) + +#ifdef CONFIG_MMU +PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) + TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) +#else +PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) + TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) #endif -#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR +#ifdef CONFIG_ARCH_USES_PG_UNCACHED PAGEFLAG(Uncached, uncached) #else PAGEFLAG_FALSE(Uncached) #endif +#ifdef CONFIG_MEMORY_FAILURE +PAGEFLAG(HWPoison, hwpoison) +TESTSCFLAG(HWPoison, hwpoison) +#define __PG_HWPOISON (1UL << PG_hwpoison) +#else +PAGEFLAG_FALSE(HWPoison) +#define __PG_HWPOISON 0 +#endif + +u64 stable_page_flags(struct page *page); + static inline int PageUptodate(struct page *page) { int ret = test_bit(PG_uptodate, &(page)->flags); @@ -243,21 +305,13 @@ static inline void __SetPageUptodate(struct page *page) static inline void SetPageUptodate(struct page *page) { -#ifdef CONFIG_S390 - if (!test_and_set_bit(PG_uptodate, &page->flags)) - page_clear_dirty(page); -#else /* * Memory barrier must be issued before setting the PG_uptodate bit, * so that all previous stores issued in order to bring the page * uptodate are actually visible before PageUptodate becomes true. - * - * s390 doesn't need an explicit smp_wmb here because the test and - * set bit already provides full barriers. */ smp_wmb(); set_bit(PG_uptodate, &(page)->flags); -#endif } CLEARPAGEFLAG(Uptodate, uptodate) @@ -265,21 +319,33 @@ CLEARPAGEFLAG(Uptodate, uptodate) extern void cancel_dirty_page(struct page *page, unsigned int account_size); int test_clear_page_writeback(struct page *page); -int test_set_page_writeback(struct page *page); +int __test_set_page_writeback(struct page *page, bool keep_write); + +#define test_set_page_writeback(page) \ + __test_set_page_writeback(page, false) +#define test_set_page_writeback_keepwrite(page) \ + __test_set_page_writeback(page, true) static inline void set_page_writeback(struct page *page) { test_set_page_writeback(page); } +static inline void set_page_writeback_keepwrite(struct page *page) +{ + test_set_page_writeback_keepwrite(page); +} + #ifdef CONFIG_PAGEFLAGS_EXTENDED /* * System with lots of page flags available. This allows separate * flags for PageHead() and PageTail() checks of compound pages so that bit * tests can be used in performance sensitive paths. PageCompound is - * generally not used in hot code paths. + * generally not used in hot code paths except arch/powerpc/mm/init_64.c + * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages + * and avoid handling those in real mode. */ -__PAGEFLAG(Head, head) +__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) __PAGEFLAG(Tail, tail) static inline int PageCompound(struct page *page) @@ -287,6 +353,16 @@ static inline int PageCompound(struct page *page) return page->flags & ((1L << PG_head) | (1L << PG_tail)); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON(!PageHead(page)); + ClearPageHead(page); +} +#endif + +#define PG_head_mask ((1L << PG_head)) + #else /* * Reduce page flag use as much as possible by overlapping @@ -295,7 +371,7 @@ static inline int PageCompound(struct page *page) * pages on the LRU and/or pagecache. */ TESTPAGEFLAG(Compound, compound) -__PAGEFLAG(Head, compound) +__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound) /* * PG_reclaim is used in combination with PG_compound to mark the @@ -307,8 +383,14 @@ __PAGEFLAG(Head, compound) * PG_compound & PG_reclaim => Tail page * PG_compound & ~PG_reclaim => Head page */ +#define PG_head_mask ((1L << PG_compound)) #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) +static inline int PageHead(struct page *page) +{ + return ((page->flags & PG_head_tail_mask) == PG_head_mask); +} + static inline int PageTail(struct page *page) { return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); @@ -324,30 +406,142 @@ static inline void __ClearPageTail(struct page *page) page->flags &= ~PG_head_tail_mask; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void ClearPageCompound(struct page *page) +{ + BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); + clear_bit(PG_compound, &page->flags); +} +#endif + #endif /* !PAGEFLAGS_EXTENDED */ -#define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ - 1 << PG_buddy | 1 << PG_writeback | \ - 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * PageHuge() only returns true for hugetlbfs pages, but not for + * normal or transparent huge pages. + * + * PageTransHuge() returns true for both transparent huge and + * hugetlbfs pages, but not normal pages. PageTransHuge() can only be + * called only in the core VM paths where hugetlbfs pages can't exist. + */ +static inline int PageTransHuge(struct page *page) +{ + VM_BUG_ON_PAGE(PageTail(page), page); + return PageHead(page); +} + +/* + * PageTransCompound returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransCompound(struct page *page) +{ + return PageCompound(page); +} /* - * Flags checked in bad_page(). Pages on the free list should not have - * these flags set. It they are, there is a problem. + * PageTransTail returns true for both transparent huge pages + * and hugetlbfs pages, so it should only be called when it's known + * that hugetlbfs pages aren't involved. + */ +static inline int PageTransTail(struct page *page) +{ + return PageTail(page); +} + +#else + +static inline int PageTransHuge(struct page *page) +{ + return 0; +} + +static inline int PageTransCompound(struct page *page) +{ + return 0; +} + +static inline int PageTransTail(struct page *page) +{ + return 0; +} +#endif + +/* + * If network-based swap is enabled, sl*b must keep track of whether pages + * were allocated from pfmemalloc reserves. */ -#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty) +static inline int PageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + return PageActive(page); +} + +static inline void SetPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + SetPageActive(page); +} + +static inline void __ClearPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + __ClearPageActive(page); +} + +static inline void ClearPageSlabPfmemalloc(struct page *page) +{ + VM_BUG_ON_PAGE(!PageSlab(page), page); + ClearPageActive(page); +} + +#ifdef CONFIG_MMU +#define __PG_MLOCKED (1 << PG_mlocked) +#else +#define __PG_MLOCKED 0 +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __PG_COMPOUND_LOCK (1 << PG_compound_lock) +#else +#define __PG_COMPOUND_LOCK 0 +#endif /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. It they are, there is a problem. */ -#define PAGE_FLAGS_CHECK_AT_FREE (PAGE_FLAGS | 1 << PG_reserved) +#define PAGE_FLAGS_CHECK_AT_FREE \ + (1 << PG_lru | 1 << PG_locked | \ + 1 << PG_private | 1 << PG_private_2 | \ + 1 << PG_writeback | 1 << PG_reserved | \ + 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ + 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ + __PG_COMPOUND_LOCK) /* * Flags checked when a page is prepped for return by the page allocator. - * Pages being prepped should not have these flags set. It they are, there - * is a problem. + * Pages being prepped should not have any flags set. It they are set, + * there has been a kernel bug or struct page corruption. */ -#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty) +#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) + +#define PAGE_FLAGS_PRIVATE \ + (1 << PG_private | 1 << PG_private_2) +/** + * page_has_private - Determine if page has private stuff + * @page: The page to be checked + * + * Determine if a page has private stuff, indicating that release routines + * should be invoked upon it. + */ +static inline int page_has_private(struct page *page) +{ + return !!(page->flags & PAGE_FLAGS_PRIVATE); +} #endif /* !__GENERATING_BOUNDS_H */ + #endif /* PAGE_FLAGS_H */ |
