aboutsummaryrefslogtreecommitdiff
path: root/mm/swap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-10 09:10:44 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-10 09:10:44 +0100
commita5a64498c194c82ecad3a2d67cff6231cda8d3dd (patch)
tree723d5d81419f9960b8d30ed9a2ece8a58d6c4328 /mm/swap.c
parentbb93d802ae5c1949977cc6da247b218240677f11 (diff)
parentf7160c7573615ec82c691e294cf80d920b5d588d (diff)
Merge commit 'v2.6.28-rc4' into timers/rtc
Conflicts: drivers/rtc/rtc-cmos.c
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c172
1 files changed, 115 insertions, 57 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 9e0cb311807..2152e48a7b8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -31,11 +31,12 @@
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
+#include "internal.h"
+
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
-static DEFINE_PER_CPU(struct pagevec, lru_add_pvecs);
-static DEFINE_PER_CPU(struct pagevec, lru_add_active_pvecs);
+static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
/*
@@ -116,8 +117,9 @@ static void pagevec_move_tail(struct pagevec *pvec)
zone = pagezone;
spin_lock(&zone->lru_lock);
}
- if (PageLRU(page) && !PageActive(page)) {
- list_move_tail(&page->lru, &zone->inactive_list);
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+ int lru = page_is_file_cache(page);
+ list_move_tail(&page->lru, &zone->lru[lru].list);
pgmoved++;
}
}
@@ -136,7 +138,7 @@ static void pagevec_move_tail(struct pagevec *pvec)
void rotate_reclaimable_page(struct page *page)
{
if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
- PageLRU(page)) {
+ !PageUnevictable(page) && PageLRU(page)) {
struct pagevec *pvec;
unsigned long flags;
@@ -157,12 +159,19 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
- if (PageLRU(page) && !PageActive(page)) {
- del_page_from_inactive_list(zone, page);
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+ int file = page_is_file_cache(page);
+ int lru = LRU_BASE + file;
+ del_page_from_lru_list(zone, page, lru);
+
SetPageActive(page);
- add_page_to_active_list(zone, page);
+ lru += LRU_ACTIVE;
+ add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
- mem_cgroup_move_lists(page, true);
+ mem_cgroup_move_lists(page, lru);
+
+ zone->recent_rotated[!!file]++;
+ zone->recent_scanned[!!file]++;
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -176,7 +185,8 @@ void activate_page(struct page *page)
*/
void mark_page_accessed(struct page *page)
{
- if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
+ if (!PageActive(page) && !PageUnevictable(page) &&
+ PageReferenced(page) && PageLRU(page)) {
activate_page(page);
ClearPageReferenced(page);
} else if (!PageReferenced(page)) {
@@ -186,28 +196,73 @@ void mark_page_accessed(struct page *page)
EXPORT_SYMBOL(mark_page_accessed);
-/**
- * lru_cache_add: add a page to the page lists
- * @page: the page to add
- */
-void lru_cache_add(struct page *page)
+void __lru_cache_add(struct page *page, enum lru_list lru)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
page_cache_get(page);
if (!pagevec_add(pvec, page))
- __pagevec_lru_add(pvec);
+ ____pagevec_lru_add(pvec, lru);
put_cpu_var(lru_add_pvecs);
}
-void lru_cache_add_active(struct page *page)
+/**
+ * lru_cache_add_lru - add a page to a page list
+ * @page: the page to be added to the LRU.
+ * @lru: the LRU list to which the page is added.
+ */
+void lru_cache_add_lru(struct page *page, enum lru_list lru)
{
- struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
+ if (PageActive(page)) {
+ VM_BUG_ON(PageUnevictable(page));
+ ClearPageActive(page);
+ } else if (PageUnevictable(page)) {
+ VM_BUG_ON(PageActive(page));
+ ClearPageUnevictable(page);
+ }
- page_cache_get(page);
- if (!pagevec_add(pvec, page))
- __pagevec_lru_add_active(pvec);
- put_cpu_var(lru_add_active_pvecs);
+ VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
+ __lru_cache_add(page, lru);
+}
+
+/**
+ * add_page_to_unevictable_list - add a page to the unevictable list
+ * @page: the page to be added to the unevictable list
+ *
+ * Add page directly to its zone's unevictable list. To avoid races with
+ * tasks that might be making the page evictable, through eg. munlock,
+ * munmap or exit, while it's not on the lru, we want to add the page
+ * while it's locked or otherwise "invisible" to other tasks. This is
+ * difficult to do when using the pagevec cache, so bypass that.
+ */
+void add_page_to_unevictable_list(struct page *page)
+{
+ struct zone *zone = page_zone(page);
+
+ spin_lock_irq(&zone->lru_lock);
+ SetPageUnevictable(page);
+ SetPageLRU(page);
+ add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
+ spin_unlock_irq(&zone->lru_lock);
+}
+
+/**
+ * lru_cache_add_active_or_unevictable
+ * @page: the page to be added to LRU
+ * @vma: vma in which page is mapped for determining reclaimability
+ *
+ * place @page on active or unevictable LRU list, depending on
+ * page_evictable(). Note that if the page is not evictable,
+ * it goes directly back onto it's zone's unevictable list. It does
+ * NOT use a per cpu pagevec.
+ */
+void lru_cache_add_active_or_unevictable(struct page *page,
+ struct vm_area_struct *vma)
+{
+ if (page_evictable(page, vma))
+ lru_cache_add_lru(page, LRU_ACTIVE + page_is_file_cache(page));
+ else
+ add_page_to_unevictable_list(page);
}
/*
@@ -217,15 +272,15 @@ void lru_cache_add_active(struct page *page)
*/
static void drain_cpu_pagevecs(int cpu)
{
+ struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
struct pagevec *pvec;
+ int lru;
- pvec = &per_cpu(lru_add_pvecs, cpu);
- if (pagevec_count(pvec))
- __pagevec_lru_add(pvec);
-
- pvec = &per_cpu(lru_add_active_pvecs, cpu);
- if (pagevec_count(pvec))
- __pagevec_lru_add_active(pvec);
+ for_each_lru(lru) {
+ pvec = &pvecs[lru - LRU_BASE];
+ if (pagevec_count(pvec))
+ ____pagevec_lru_add(pvec, lru);
+ }
pvec = &per_cpu(lru_rotate_pvecs, cpu);
if (pagevec_count(pvec)) {
@@ -244,7 +299,7 @@ void lru_add_drain(void)
put_cpu();
}
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) || defined(CONFIG_UNEVICTABLE_LRU)
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
@@ -308,6 +363,7 @@ void release_pages(struct page **pages, int nr, int cold)
if (PageLRU(page)) {
struct zone *pagezone = page_zone(page);
+
if (pagezone != zone) {
if (zone)
spin_unlock_irqrestore(&zone->lru_lock,
@@ -380,10 +436,11 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
* Add the passed pages to the LRU, then drop the caller's refcount
* on them. Reinitialises the caller's pagevec.
*/
-void __pagevec_lru_add(struct pagevec *pvec)
+void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
{
int i;
struct zone *zone = NULL;
+ VM_BUG_ON(is_unevictable_lru(lru));
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
@@ -395,9 +452,13 @@ void __pagevec_lru_add(struct pagevec *pvec)
zone = pagezone;
spin_lock_irq(&zone->lru_lock);
}
+ VM_BUG_ON(PageActive(page));
+ VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
- add_page_to_inactive_list(zone, page);
+ if (is_active_lru(lru))
+ SetPageActive(page);
+ add_page_to_lru_list(zone, page, lru);
}
if (zone)
spin_unlock_irq(&zone->lru_lock);
@@ -405,48 +466,45 @@ void __pagevec_lru_add(struct pagevec *pvec)
pagevec_reinit(pvec);
}
-EXPORT_SYMBOL(__pagevec_lru_add);
+EXPORT_SYMBOL(____pagevec_lru_add);
-void __pagevec_lru_add_active(struct pagevec *pvec)
+/*
+ * Try to drop buffers from the pages in a pagevec
+ */
+void pagevec_strip(struct pagevec *pvec)
{
int i;
- struct zone *zone = NULL;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
+ if (PagePrivate(page) && trylock_page(page)) {
+ if (PagePrivate(page))
+ try_to_release_page(page, 0);
+ unlock_page(page);
}
- VM_BUG_ON(PageLRU(page));
- SetPageLRU(page);
- VM_BUG_ON(PageActive(page));
- SetPageActive(page);
- add_page_to_active_list(zone, page);
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- release_pages(pvec->pages, pvec->nr, pvec->cold);
- pagevec_reinit(pvec);
}
-/*
- * Try to drop buffers from the pages in a pagevec
+/**
+ * pagevec_swap_free - try to free swap space from the pages in a pagevec
+ * @pvec: pagevec with swapcache pages to free the swap space of
+ *
+ * The caller needs to hold an extra reference to each page and
+ * not hold the page lock on the pages. This function uses a
+ * trylock on the page lock so it may not always free the swap
+ * space associated with a page.
*/
-void pagevec_strip(struct pagevec *pvec)
+void pagevec_swap_free(struct pagevec *pvec)
{
int i;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- if (PagePrivate(page) && trylock_page(page)) {
- if (PagePrivate(page))
- try_to_release_page(page, 0);
+ if (PageSwapCache(page) && trylock_page(page)) {
+ if (PageSwapCache(page))
+ remove_exclusive_swap_page_ref(page);
unlock_page(page);
}
}