aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c1
-rw-r--r--mm/memcontrol.c19
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c28
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/percpu.c2
6 files changed, 40 insertions, 14 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 4d709ee5901..1a8894eadf7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -279,7 +279,6 @@ static unsigned long isolate_migratepages(struct zone *zone,
/* Successfully isolated */
del_page_from_lru_list(zone, page, page_lru(page));
list_add(&page->lru, migratelist);
- mem_cgroup_del_lru(page);
cc->nr_migratepages++;
/* Avoid isolating too much */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7a22b412921..00bb8a64d02 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1925,19 +1925,18 @@ again:
rcu_read_lock();
p = rcu_dereference(mm->owner);
- VM_BUG_ON(!p);
/*
- * because we don't have task_lock(), "p" can exit while
- * we're here. In that case, "mem" can point to root
- * cgroup but never be NULL. (and task_struct itself is freed
- * by RCU, cgroup itself is RCU safe.) Then, we have small
- * risk here to get wrong cgroup. But such kind of mis-account
- * by race always happens because we don't have cgroup_mutex().
- * It's overkill and we allow that small race, here.
+ * Because we don't have task_lock(), "p" can exit.
+ * In that case, "mem" can point to root or p can be NULL with
+ * race with swapoff. Then, we have small risk of mis-accouning.
+ * But such kind of mis-account by race always happens because
+ * we don't have cgroup_mutex(). It's overkill and we allo that
+ * small race, here.
+ * (*) swapoff at el will charge against mm-struct not against
+ * task-struct. So, mm->owner can be NULL.
*/
mem = mem_cgroup_from_task(p);
- VM_BUG_ON(!mem);
- if (mem_cgroup_is_root(mem)) {
+ if (!mem || mem_cgroup_is_root(mem)) {
rcu_read_unlock();
goto done;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index fe5a3c6a542..6ae8a66a704 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -35,6 +35,8 @@
#include <linux/hugetlb.h>
#include <linux/gfp.h>
+#include <asm/tlbflush.h>
+
#include "internal.h"
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
diff --git a/mm/nommu.c b/mm/nommu.c
index 27a9ac58851..ef4045d010d 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -10,7 +10,7 @@
* Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
* Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
- * Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
*/
#include <linux/module.h>
@@ -328,6 +328,7 @@ void *vmalloc_node(unsigned long size, int node)
{
return vmalloc(size);
}
+EXPORT_SYMBOL(vmalloc_node);
/**
* vzalloc_node - allocate memory on a specific node with zero fill
@@ -440,6 +441,31 @@ void __attribute__((weak)) vmalloc_sync_all(void)
{
}
+/**
+ * alloc_vm_area - allocate a range of kernel address space
+ * @size: size of the area
+ *
+ * Returns: NULL on failure, vm_struct on success
+ *
+ * This function reserves a range of kernel address space, and
+ * allocates pagetables to map that range. No actual mappings
+ * are created. If the kernel address space is not shared
+ * between processes, it syncs the pagetable across all
+ * processes.
+ */
+struct vm_struct *alloc_vm_area(size_t size)
+{
+ BUG();
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_vm_area);
+
+void free_vm_area(struct vm_struct *area)
+{
+ BUG();
+}
+EXPORT_SYMBOL_GPL(free_vm_area);
+
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
struct page *page)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b840afa8976..b4edfe7ce06 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -563,7 +563,7 @@ static void balance_dirty_pages(struct address_space *mapping,
break; /* We've done our duty */
}
trace_wbc_balance_dirty_wait(&wbc, bdi);
- __set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
io_schedule_timeout(pause);
/*
diff --git a/mm/percpu.c b/mm/percpu.c
index efe816856a9..02ba91230b9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1268,7 +1268,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
/* we're done parsing the input, undefine BUG macro and dump config */
#undef PCPU_SETUP_BUG_ON
- pcpu_dump_alloc_info(KERN_INFO, ai);
+ pcpu_dump_alloc_info(KERN_DEBUG, ai);
pcpu_nr_groups = ai->nr_groups;
pcpu_group_offsets = group_offsets;