diff options
Diffstat (limited to 'mm/internal.h')
| -rw-r--r-- | mm/internal.h | 74 | 
1 files changed, 37 insertions, 37 deletions
diff --git a/mm/internal.h b/mm/internal.h index 684f7aa9692..7f22a11fcc6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -11,6 +11,7 @@  #ifndef __MM_INTERNAL_H  #define __MM_INTERNAL_H +#include <linux/fs.h>  #include <linux/mm.h>  void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, @@ -21,14 +22,28 @@ static inline void set_page_count(struct page *page, int v)  	atomic_set(&page->_count, v);  } +extern int __do_page_cache_readahead(struct address_space *mapping, +		struct file *filp, pgoff_t offset, unsigned long nr_to_read, +		unsigned long lookahead_size); + +/* + * Submit IO for the read-ahead request in file_ra_state. + */ +static inline unsigned long ra_submit(struct file_ra_state *ra, +		struct address_space *mapping, struct file *filp) +{ +	return __do_page_cache_readahead(mapping, filp, +					ra->start, ra->size, ra->async_size); +} +  /*   * Turn a non-refcounted page (->_count == 0) into refcounted with   * a count of one.   */  static inline void set_page_refcounted(struct page *page)  { -	VM_BUG_ON(PageTail(page)); -	VM_BUG_ON(atomic_read(&page->_count)); +	VM_BUG_ON_PAGE(PageTail(page), page); +	VM_BUG_ON_PAGE(atomic_read(&page->_count), page);  	set_page_count(page, 1);  } @@ -46,12 +61,10 @@ static inline void __get_page_tail_foll(struct page *page,  	 * speculative page access (like in  	 * page_cache_get_speculative()) on tail pages.  	 */ -	VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0); -	VM_BUG_ON(atomic_read(&page->_count) != 0); -	VM_BUG_ON(page_mapcount(page) < 0); +	VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page);  	if (get_page_head)  		atomic_inc(&page->first_page->_count); -	atomic_inc(&page->_mapcount); +	get_huge_page_tail(page);  }  /* @@ -73,7 +86,7 @@ static inline void get_page_foll(struct page *page)  		 * Getting a normal page or the head of a compound page  		 * requires to already have an elevated page->_count.  		 */ -		VM_BUG_ON(atomic_read(&page->_count) <= 0); +		VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);  		atomic_inc(&page->_count);  	}  } @@ -85,7 +98,6 @@ extern unsigned long highest_memmap_pfn;   */  extern int isolate_lru_page(struct page *page);  extern void putback_lru_page(struct page *page); -extern unsigned long zone_reclaimable_pages(struct zone *zone);  extern bool zone_reclaimable(struct zone *zone);  /* @@ -101,6 +113,7 @@ extern void prep_compound_page(struct page *page, unsigned long order);  #ifdef CONFIG_MEMORY_FAILURE  extern bool is_free_buddy_page(struct page *page);  #endif +extern int user_min_free_kbytes;  #if defined CONFIG_COMPACTION || defined CONFIG_CMA @@ -121,7 +134,7 @@ struct compact_control {  	unsigned long nr_migratepages;	/* Number of pages to migrate */  	unsigned long free_pfn;		/* isolate_freepages search base */  	unsigned long migrate_pfn;	/* isolate_migratepages search base */ -	bool sync;			/* Synchronous migration */ +	enum migrate_mode mode;		/* Async or sync migration mode */  	bool ignore_skip_hint;		/* Scan blocks even if marked skip */  	bool finished_update_free;	/* True when the zone cached pfns are  					 * no longer being updated @@ -131,7 +144,10 @@ struct compact_control {  	int order;			/* order a direct compactor needs */  	int migratetype;		/* MOVABLE, RECLAIMABLE etc */  	struct zone *zone; -	bool contended;			/* True if a lock was contended */ +	bool contended;			/* True if a lock was contended, or +					 * need_resched() true during async +					 * compaction +					 */  };  unsigned long @@ -144,9 +160,11 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,  #endif  /* - * function for dealing with page's order in buddy system. - * zone->lock is already acquired when we use these. - * So, we don't need atomic page->flags operations here. + * This function returns the order of a free page in the buddy system. In + * general, page_zone(page)->lock must be held by the caller to prevent the + * page from being allocated in parallel and returning garbage as the order. + * If a caller does not hold page_zone(page)->lock, it must guarantee that the + * page cannot be allocated or merged in parallel.   */  static inline unsigned long page_order(struct page *page)  { @@ -154,6 +172,11 @@ static inline unsigned long page_order(struct page *page)  	return page_private(page);  } +static inline bool is_cow_mapping(vm_flags_t flags) +{ +	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; +} +  /* mm/util.c */  void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,  		struct vm_area_struct *prev, struct rb_node *rb_parent); @@ -169,26 +192,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)  }  /* - * Called only in fault path, to determine if a new page is being - * mapped into a LOCKED vma.  If it is, mark page as mlocked. - */ -static inline int mlocked_vma_newpage(struct vm_area_struct *vma, -				    struct page *page) -{ -	VM_BUG_ON(PageLRU(page)); - -	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) -		return 0; - -	if (!TestSetPageMlocked(page)) { -		mod_zone_page_state(page_zone(page), NR_MLOCK, -				    hpage_nr_pages(page)); -		count_vm_event(UNEVICTABLE_PGMLOCKED); -	} -	return 1; -} - -/*   * must be called with vma's mmap_sem held for read or write, and page locked.   */  extern void mlock_vma_page(struct page *page); @@ -230,10 +233,6 @@ extern unsigned long vma_address(struct page *page,  				 struct vm_area_struct *vma);  #endif  #else /* !CONFIG_MMU */ -static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p) -{ -	return 0; -}  static inline void clear_page_mlock(struct page *page) { }  static inline void mlock_vma_page(struct page *page) { }  static inline void mlock_migrate_page(struct page *new, struct page *old) { } @@ -370,5 +369,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,  #define ALLOC_HIGH		0x20 /* __GFP_HIGH set */  #define ALLOC_CPUSET		0x40 /* check for correct cpuset */  #define ALLOC_CMA		0x80 /* allow allocations from CMA areas */ +#define ALLOC_FAIR		0x100 /* fair zone allocation */  #endif	/* __MM_INTERNAL_H */  | 
