aboutsummaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c122
1 files changed, 37 insertions, 85 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 824676a4ca7..cb33d9cd4d6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -636,7 +636,7 @@ redo:
* When racing with an mlock or AS_UNEVICTABLE clearing
* (page is unlocked) make sure that if the other thread
* does not observe our setting of PG_lru and fails
- * isolation/check_move_unevictable_page,
+ * isolation/check_move_unevictable_pages,
* we see PG_mlocked/AS_UNEVICTABLE cleared below and move
* the page back to the evictable list.
*
@@ -3355,104 +3355,56 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
#ifdef CONFIG_SHMEM
/**
- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
- * @page: page to check evictability and move to appropriate lru list
- * @zone: zone page is in
+ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
+ * @pages: array of pages to check
+ * @nr_pages: number of pages to check
*
- * Checks a page for evictability and moves the page to the appropriate
- * zone lru list.
- *
- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
- * have PageUnevictable set.
+ * Checks pages for evictability and moves them to the appropriate lru list.
*
* This function is only used for SysV IPC SHM_UNLOCK.
*/
-static void check_move_unevictable_page(struct page *page, struct zone *zone)
+void check_move_unevictable_pages(struct page **pages, int nr_pages)
{
- VM_BUG_ON(PageActive(page));
+ struct zone *zone = NULL;
+ int pgscanned = 0;
+ int pgrescued = 0;
+ int i;
-retry:
- ClearPageUnevictable(page);
- if (page_evictable(page, NULL)) {
- enum lru_list l = page_lru_base_type(page);
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pages[i];
+ struct zone *pagezone;
- __dec_zone_state(zone, NR_UNEVICTABLE);
- list_move(&page->lru, &zone->lru[l].list);
- mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
- __inc_zone_state(zone, NR_INACTIVE_ANON + l);
- __count_vm_event(UNEVICTABLE_PGRESCUED);
- } else {
- /*
- * rotate unevictable list
- */
- SetPageUnevictable(page);
- list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
- mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
- if (page_evictable(page, NULL))
- goto retry;
- }
-}
-
-/**
- * scan_mapping_unevictable_pages - scan an address space for evictable pages
- * @mapping: struct address_space to scan for evictable pages
- *
- * Scan all pages in mapping. Check unevictable pages for
- * evictability and move them to the appropriate zone lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
- */
-void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
- pgoff_t next = 0;
- pgoff_t end = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- struct zone *zone;
- struct pagevec pvec;
+ pgscanned++;
+ pagezone = page_zone(page);
+ if (pagezone != zone) {
+ if (zone)
+ spin_unlock_irq(&zone->lru_lock);
+ zone = pagezone;
+ spin_lock_irq(&zone->lru_lock);
+ }
- if (mapping->nrpages == 0)
- return;
+ if (!PageLRU(page) || !PageUnevictable(page))
+ continue;
- pagevec_init(&pvec, 0);
- while (next < end &&
- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
- int i;
- int pg_scanned = 0;
-
- zone = NULL;
-
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- pgoff_t page_index = page->index;
- struct zone *pagezone = page_zone(page);
-
- pg_scanned++;
- if (page_index > next)
- next = page_index;
- next++;
-
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
- }
+ if (page_evictable(page, NULL)) {
+ enum lru_list lru = page_lru_base_type(page);
- if (PageLRU(page) && PageUnevictable(page))
- check_move_unevictable_page(page, zone);
+ VM_BUG_ON(PageActive(page));
+ ClearPageUnevictable(page);
+ __dec_zone_state(zone, NR_UNEVICTABLE);
+ list_move(&page->lru, &zone->lru[lru].list);
+ mem_cgroup_move_lists(page, LRU_UNEVICTABLE, lru);
+ __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+ pgrescued++;
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- pagevec_release(&pvec);
+ }
- count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
- cond_resched();
+ if (zone) {
+ __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+ __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
+ spin_unlock_irq(&zone->lru_lock);
}
}
-#else
-void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-}
#endif /* CONFIG_SHMEM */
static void warn_scan_unevictable_pages(void)