aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c1
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/slab.c12
-rw-r--r--mm/swap.c4
4 files changed, 16 insertions, 13 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7b84dc81434..13df01c5047 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1893,6 +1893,7 @@ int should_remove_suid(struct dentry *dentry)
return 0;
}
+EXPORT_SYMBOL(should_remove_suid);
int __remove_suid(struct dentry *dentry, int kill)
{
diff --git a/mm/nommu.c b/mm/nommu.c
index 8bdde9508f3..6a2a8aada40 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -497,15 +497,17 @@ static int validate_mmap_request(struct file *file,
(flags & MAP_TYPE) != MAP_SHARED)
return -EINVAL;
- if (PAGE_ALIGN(len) == 0)
- return addr;
-
- if (len > TASK_SIZE)
+ if (!len)
return -EINVAL;
+ /* Careful about overflows.. */
+ len = PAGE_ALIGN(len);
+ if (!len || len > TASK_SIZE)
+ return -ENOMEM;
+
/* offset overflow? */
if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
- return -EINVAL;
+ return -EOVERFLOW;
if (file) {
/* validate file mapping requests */
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34edd..5de81473df3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep);
-static void cache_reap(void *unused);
+static void cache_reap(struct work_struct *unused);
/*
* This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@ int slab_is_available(void)
return g_cpucache_up == FULL;
}
-static DEFINE_PER_CPU(struct work_struct, reap_work);
+static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
*/
static void __devinit start_cpu_timer(int cpu)
{
- struct work_struct *reap_work = &per_cpu(reap_work, cpu);
+ struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
- if (keventd_up() && reap_work->func == NULL) {
+ if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
- INIT_WORK(reap_work, cache_reap, NULL);
+ INIT_DELAYED_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
}
}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
-static void cache_reap(void *unused)
+static void cache_reap(struct work_struct *unused)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871f542..d9a3770d8f3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@ void lru_add_drain(void)
}
#ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
lru_add_drain();
}
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
*/
int lru_add_drain_all(void)
{
- return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+ return schedule_on_each_cpu(lru_add_drain_per_cpu);
}
#else