aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/mempolicy.c64
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/pdflush.c13
-rw-r--r--mm/swap.c4
-rw-r--r--mm/tiny-shmem.c5
-rw-r--r--mm/truncate.c11
7 files changed, 88 insertions, 15 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 768687f1d46..5d6e4c2000d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1030,8 +1030,8 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
desc.error = 0;
do_generic_file_read(filp,ppos,&desc,file_read_actor);
retval += desc.written;
- if (!retval) {
- retval = desc.error;
+ if (desc.error) {
+ retval = retval ?: desc.error;
break;
}
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2076b1542b8..5abc57c2b8b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -457,6 +457,7 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
+ cpuset_update_current_mems_allowed();
if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
if (flags & MPOL_F_ADDR) {
@@ -1206,3 +1207,66 @@ void numa_default_policy(void)
{
do_set_mempolicy(MPOL_DEFAULT, NULL);
}
+
+/* Migrate a policy to a different set of nodes */
+static void rebind_policy(struct mempolicy *pol, const nodemask_t *old,
+ const nodemask_t *new)
+{
+ nodemask_t tmp;
+
+ if (!pol)
+ return;
+
+ switch (pol->policy) {
+ case MPOL_DEFAULT:
+ break;
+ case MPOL_INTERLEAVE:
+ nodes_remap(tmp, pol->v.nodes, *old, *new);
+ pol->v.nodes = tmp;
+ current->il_next = node_remap(current->il_next, *old, *new);
+ break;
+ case MPOL_PREFERRED:
+ pol->v.preferred_node = node_remap(pol->v.preferred_node,
+ *old, *new);
+ break;
+ case MPOL_BIND: {
+ nodemask_t nodes;
+ struct zone **z;
+ struct zonelist *zonelist;
+
+ nodes_clear(nodes);
+ for (z = pol->v.zonelist->zones; *z; z++)
+ node_set((*z)->zone_pgdat->node_id, nodes);
+ nodes_remap(tmp, nodes, *old, *new);
+ nodes = tmp;
+
+ zonelist = bind_zonelist(&nodes);
+
+ /* If no mem, then zonelist is NULL and we keep old zonelist.
+ * If that old zonelist has no remaining mems_allowed nodes,
+ * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
+ */
+
+ if (zonelist) {
+ /* Good - got mem - substitute new zonelist */
+ kfree(pol->v.zonelist);
+ pol->v.zonelist = zonelist;
+ }
+ break;
+ }
+ default:
+ BUG();
+ break;
+ }
+}
+
+/*
+ * Someone moved this task to different nodes. Fixup mempolicies.
+ *
+ * TODO - fixup current->mm->vma and shmfs/tmpfs/hugetlbfs policies as well,
+ * once we have a cpuset mechanism to mark which cpuset subtree is migrating.
+ */
+void numa_policy_rebind(const nodemask_t *old, const nodemask_t *new)
+{
+ rebind_policy(current->mempolicy, old, new);
+}
diff --git a/mm/mmap.c b/mm/mmap.c
index 5ecc2cf3e1d..320dda1778c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1840,7 +1840,7 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_VM
if (unlikely(down_read_trylock(&mm->mmap_sem))) {
WARN_ON(1);
up_read(&mm->mmap_sem);
diff --git a/mm/pdflush.c b/mm/pdflush.c
index d6781951267..52822c98c48 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -20,6 +20,7 @@
#include <linux/fs.h> // Needed by writeback.h
#include <linux/writeback.h> // Prototypes pdflush_operation()
#include <linux/kthread.h>
+#include <linux/cpuset.h>
/*
@@ -170,12 +171,24 @@ static int __pdflush(struct pdflush_work *my_work)
static int pdflush(void *dummy)
{
struct pdflush_work my_work;
+ cpumask_t cpus_allowed;
/*
* pdflush can spend a lot of time doing encryption via dm-crypt. We
* don't want to do that at keventd's priority.
*/
set_user_nice(current, 0);
+
+ /*
+ * Some configs put our parent kthread in a limited cpuset,
+ * which kthread() overrides, forcing cpus_allowed == CPU_MASK_ALL.
+ * Our needs are more modest - cut back to our cpusets cpus_allowed.
+ * This is needed as pdflush's are dynamically created and destroyed.
+ * The boottime pdflush's are easily placed w/o these 2 lines.
+ */
+ cpus_allowed = cpuset_cpus_allowed(current);
+ set_cpus_allowed(current, cpus_allowed);
+
return __pdflush(&my_work);
}
diff --git a/mm/swap.c b/mm/swap.c
index b89512877ec..154ae13d8b7 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -259,6 +259,8 @@ void __pagevec_release(struct pagevec *pvec)
pagevec_reinit(pvec);
}
+EXPORT_SYMBOL(__pagevec_release);
+
/*
* pagevec_release() for pages which are known to not be on the LRU
*
@@ -270,7 +272,6 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
struct pagevec pages_to_free;
pagevec_init(&pages_to_free, pvec->cold);
- pages_to_free.cold = pvec->cold;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
@@ -388,6 +389,7 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
return pagevec_count(pvec);
}
+EXPORT_SYMBOL(pagevec_lookup_tag);
#ifdef CONFIG_SMP
/*
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index c13a2161bca..b58abcf44ed 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -31,11 +31,14 @@ static struct vfsmount *shm_mnt;
static int __init init_tmpfs(void)
{
- register_filesystem(&tmpfs_fs_type);
+ BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
+
#ifdef CONFIG_TMPFS
devfs_mk_dir("shm");
#endif
shm_mnt = kern_mount(&tmpfs_fs_type);
+ BUG_ON(IS_ERR(shm_mnt));
+
return 0;
}
module_init(init_tmpfs)
diff --git a/mm/truncate.c b/mm/truncate.c
index 60c8764bfac..29c18f68dc3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -13,18 +13,9 @@
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/buffer_head.h> /* grr. try_to_release_page,
- block_invalidatepage */
+ do_invalidatepage */
-static int do_invalidatepage(struct page *page, unsigned long offset)
-{
- int (*invalidatepage)(struct page *, unsigned long);
- invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
- invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
-}
-
static inline void truncate_partial_page(struct page *page, unsigned partial)
{
memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);