diff options
Diffstat (limited to 'drivers/staging/android/ashmem.c')
| -rw-r--r-- | drivers/staging/android/ashmem.c | 342 |
1 files changed, 238 insertions, 104 deletions
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 99052bfd3a2..713a9722678 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -1,24 +1,27 @@ /* mm/ashmem.c -** -** Anonymous Shared Memory Subsystem, ashmem -** -** Copyright (C) 2008 Google, Inc. -** -** Robert Love <rlove@google.com> -** -** This software is licensed under the terms of the GNU General Public -** License version 2, as published by the Free Software Foundation, and -** may be copied, distributed, and modified under those terms. -** -** This program is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -** GNU General Public License for more details. -*/ + * + * Anonymous Shared Memory Subsystem, ashmem + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "ashmem: " fmt #include <linux/module.h> #include <linux/file.h> #include <linux/fs.h> +#include <linux/falloc.h> #include <linux/miscdevice.h> #include <linux/security.h> #include <linux/mm.h> @@ -34,41 +37,59 @@ #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) -/* - * ashmem_area - anonymous shared memory area - * Lifecycle: From our parent file's open() until its release() - * Locking: Protected by `ashmem_mutex' - * Big Note: Mappings do NOT pin this structure; it dies on close() +/** + * struct ashmem_area - The anonymous shared memory area + * @name: The optional name in /proc/pid/maps + * @unpinned_list: The list of all ashmem areas + * @file: The shmem-based backing file + * @size: The size of the mapping, in bytes + * @prot_masks: The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close() */ struct ashmem_area { - char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */ - struct list_head unpinned_list; /* list of all ashmem areas */ - struct file *file; /* the shmem-based backing file */ - size_t size; /* size of the mapping, in bytes */ - unsigned long prot_mask; /* allowed prot bits, as vm_flags */ + char name[ASHMEM_FULL_NAME_LEN]; + struct list_head unpinned_list; + struct file *file; + size_t size; + unsigned long prot_mask; }; -/* - * ashmem_range - represents an interval of unpinned (evictable) pages - * Lifecycle: From unpin to pin - * Locking: Protected by `ashmem_mutex' +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru: The entry in the LRU list + * @unpinned: The entry in its area's unpinned list + * @asma: The associated anonymous shared memory area. + * @pgstart: The starting page (inclusive) + * @pgend: The ending page (inclusive) + * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex' */ struct ashmem_range { - struct list_head lru; /* entry in LRU list */ - struct list_head unpinned; /* entry in its area's unpinned list */ - struct ashmem_area *asma; /* associated area */ - size_t pgstart; /* starting page, inclusive */ - size_t pgend; /* ending page, inclusive */ - unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ + struct list_head lru; + struct list_head unpinned; + struct ashmem_area *asma; + size_t pgstart; + size_t pgend; + unsigned int purged; }; /* LRU list of unpinned pages, protected by ashmem_mutex */ static LIST_HEAD(ashmem_lru_list); -/* Count of pages on our LRU list, protected by ashmem_mutex */ +/** + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */ static unsigned long lru_count; -/* +/** * ashmem_mutex - protects the list of and each individual ashmem_area * * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem @@ -102,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly; #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) +/** + * lru_add() - Adds a range of memory to the LRU list + * @range: The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */ static inline void lru_add(struct ashmem_range *range) { list_add_tail(&range->lru, &ashmem_lru_list); lru_count += range_size(range); } +/** + * lru_del() - Removes a range of memory from the LRU list + * @range: The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */ static inline void lru_del(struct ashmem_range *range) { list_del(&range->lru); lru_count -= range_size(range); } -/* - * range_alloc - allocate and initialize a new ashmem_range structure +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma: The associated ashmem_area + * @prev_range: The previous ashmem_range in the sorted asma->unpinned list + * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start: The starting page (inclusive) + * @end: The ending page (inclusive) * - * 'asma' - associated ashmem_area - * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list - * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) - * 'start' - starting page, inclusive - * 'end' - ending page, inclusive + * This function is protected by ashmem_mutex. * - * Caller must hold ashmem_mutex. + * Return: 0 if successful, or -ENOMEM if there is an error */ static int range_alloc(struct ashmem_area *asma, struct ashmem_range *prev_range, unsigned int purged, @@ -148,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma, return 0; } +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range: The associated ashmem_range that has previously been allocated + */ static void range_del(struct ashmem_range *range) { list_del(&range->unpinned); @@ -156,10 +196,17 @@ static void range_del(struct ashmem_range *range) kmem_cache_free(ashmem_range_cachep, range); } -/* - * range_shrink - shrinks a range +/** + * range_shrink() - Shrinks an ashmem_range + * @range: The associated ashmem_range being shrunk + * @start: The starting byte of the new range + * @end: The ending byte of the new range * - * Caller must hold ashmem_mutex. + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger. */ static inline void range_shrink(struct ashmem_range *range, size_t start, size_t end) @@ -173,6 +220,16 @@ static inline void range_shrink(struct ashmem_range *range, lru_count -= pre - range_size(range); } +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode: The backing file's index node(?) + * @file: The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */ static int ashmem_open(struct inode *inode, struct file *file) { struct ashmem_area *asma; @@ -194,6 +251,14 @@ static int ashmem_open(struct inode *inode, struct file *file) return 0; } +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored: The backing file's Index Node(?) - It is ignored here. + * @file: The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */ static int ashmem_release(struct inode *ignored, struct file *file) { struct ashmem_area *asma = file->private_data; @@ -211,6 +276,15 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } +/** + * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file + * @file: The associated backing file. + * @buf: The buffer of data being written to + * @len: The number of bytes being read + * @pos: The position of the first byte to read. + * + * Return: 0 if successful, or another return code if not. + */ static ssize_t ashmem_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { @@ -221,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, /* If size is not set, or set to 0, always return EOF. */ if (asma->size == 0) - goto out; + goto out_unlock; if (!asma->file) { ret = -EBADF; - goto out; + goto out_unlock; } - ret = asma->file->f_op->read(asma->file, buf, len, pos); - if (ret < 0) - goto out; + mutex_unlock(&ashmem_mutex); - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret >= 0) { + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + } + return ret; -out: +out_unlock: mutex_unlock(&ashmem_mutex); return ret; } @@ -269,7 +351,7 @@ out: return ret; } -static inline unsigned long calc_vm_may_flags(unsigned long prot) +static inline vm_flags_t calc_vm_may_flags(unsigned long prot) { return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | @@ -315,7 +397,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) get_file(asma->file); /* - * XXX - Reworked to use shmem_zero_setup() instead of + * XXX - Reworked to use shmem_zero_setup() instead of * shmem_set_file while we're in staging. -jstultz */ if (vma->vm_flags & VM_SHARED) { @@ -329,7 +411,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_file) fput(vma->vm_file); vma->vm_file = asma->file; - vma->vm_flags |= VM_CAN_NONLINEAR; out: mutex_unlock(&ashmem_mutex); @@ -339,49 +420,64 @@ out: /* * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab * - * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how - * many objects (pages) we have in total. + * 'nr_to_scan' is the number of objects to scan for freeing. * * 'gfp_mask' is the mask of the allocation that got us into this mess. * - * Return value is the number of objects (pages) remaining, or -1 if we cannot + * Return value is the number of objects freed or -1 if we cannot * proceed without risk of deadlock (due to gfp_mask). * * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' * pages freed. */ -static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc) +static unsigned long +ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct ashmem_range *range, *next; + unsigned long freed = 0; /* We might recurse into filesystem code, so bail out if necessary */ - if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) - return -1; - if (!sc->nr_to_scan) - return lru_count; + if (!(sc->gfp_mask & __GFP_FS)) + return SHRINK_STOP; mutex_lock(&ashmem_mutex); list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { - struct inode *inode = range->asma->file->f_dentry->d_inode; loff_t start = range->pgstart * PAGE_SIZE; - loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; + loff_t end = (range->pgend + 1) * PAGE_SIZE; - vmtruncate_range(inode, start, end); + do_fallocate(range->asma->file, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, + start, end - start); range->purged = ASHMEM_WAS_PURGED; lru_del(range); - sc->nr_to_scan -= range_size(range); - if (sc->nr_to_scan <= 0) + freed += range_size(range); + if (--sc->nr_to_scan <= 0) break; } mutex_unlock(&ashmem_mutex); + return freed; +} +static unsigned long +ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ + /* + * note that lru_count is count of pages on the lru, not a count of + * objects on the list. This means the scan function needs to return the + * number of pages freed, not the number of objects scanned. + */ return lru_count; } static struct shrinker ashmem_shrinker = { - .shrink = ashmem_shrink, + .count_objects = ashmem_shrink_count, + .scan_objects = ashmem_shrink_scan, + /* + * XXX (dchinner): I wish people would comment on why they need on + * significant changes to the default value here + */ .seeks = DEFAULT_SEEKS * 4, }; @@ -410,50 +506,68 @@ out: static int set_name(struct ashmem_area *asma, void __user *name) { + int len; int ret = 0; + char local_name[ASHMEM_NAME_LEN]; + /* + * Holding the ashmem_mutex while doing a copy_from_user might cause + * an data abort which would try to access mmap_sem. If another + * thread has invoked ashmem_mmap then it will be holding the + * semaphore and will be waiting for ashmem_mutex, there by leading to + * deadlock. We'll release the mutex and take the name to a local + * variable that does not need protection and later copy the local + * variable to the structure member with lock held. + */ + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; mutex_lock(&ashmem_mutex); - /* cannot change an existing mapping's name */ - if (unlikely(asma->file)) { + if (unlikely(asma->file)) ret = -EINVAL; - goto out; - } - - if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, - name, ASHMEM_NAME_LEN))) - ret = -EFAULT; - asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); -out: mutex_unlock(&ashmem_mutex); - return ret; } static int get_name(struct ashmem_area *asma, void __user *name) { int ret = 0; + size_t len; + /* + * Have a local variable to which we'll copy the content + * from asma with the lock held. Later we can copy this to the user + * space safely without holding any locks. So even if we proceed to + * wait for mmap_sem, it won't lead to deadlock. + */ + char local_name[ASHMEM_NAME_LEN]; mutex_lock(&ashmem_mutex); if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { - size_t len; /* * Copying only `len', instead of ASHMEM_NAME_LEN, bytes * prevents us from revealing one user's stack to another. */ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; - if (unlikely(copy_to_user(name, - asma->name + ASHMEM_NAME_PREFIX_LEN, len))) - ret = -EFAULT; + memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); } else { - if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, - sizeof(ASHMEM_NAME_DEF)))) - ret = -EFAULT; + len = sizeof(ASHMEM_NAME_DEF); + memcpy(local_name, ASHMEM_NAME_DEF, len); } mutex_unlock(&ashmem_mutex); + /* + * Now we are just copying from the stack variable to userland + * No lock held + */ + if (unlikely(copy_to_user(name, local_name, len))) + ret = -EFAULT; return ret; } @@ -668,11 +782,11 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (capable(CAP_SYS_ADMIN)) { struct shrink_control sc = { .gfp_mask = GFP_KERNEL, - .nr_to_scan = 0, + .nr_to_scan = LONG_MAX, }; - ret = ashmem_shrink(&ashmem_shrinker, &sc); - sc.nr_to_scan = ret; - ashmem_shrink(&ashmem_shrinker, &sc); + ret = ashmem_shrink_count(&ashmem_shrinker, &sc); + nodes_setall(sc.nodes_to_scan); + ashmem_shrink_scan(&ashmem_shrinker, &sc); } break; } @@ -680,7 +794,25 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return ret; } -static struct file_operations ashmem_fops = { +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + + switch (cmd) { + case COMPAT_ASHMEM_SET_SIZE: + cmd = ASHMEM_SET_SIZE; + break; + case COMPAT_ASHMEM_SET_PROT_MASK: + cmd = ASHMEM_SET_PROT_MASK; + break; + } + return ashmem_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, @@ -688,7 +820,9 @@ static struct file_operations ashmem_fops = { .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, - .compat_ioctl = ashmem_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_ashmem_ioctl, +#endif }; static struct miscdevice ashmem_misc = { @@ -705,7 +839,7 @@ static int __init ashmem_init(void) sizeof(struct ashmem_area), 0, 0, NULL); if (unlikely(!ashmem_area_cachep)) { - printk(KERN_ERR "ashmem: failed to create slab cache\n"); + pr_err("failed to create slab cache\n"); return -ENOMEM; } @@ -713,19 +847,19 @@ static int __init ashmem_init(void) sizeof(struct ashmem_range), 0, 0, NULL); if (unlikely(!ashmem_range_cachep)) { - printk(KERN_ERR "ashmem: failed to create slab cache\n"); + pr_err("failed to create slab cache\n"); return -ENOMEM; } ret = misc_register(&ashmem_misc); if (unlikely(ret)) { - printk(KERN_ERR "ashmem: failed to register misc device!\n"); + pr_err("failed to register misc device!\n"); return ret; } register_shrinker(&ashmem_shrinker); - printk(KERN_INFO "ashmem: initialized\n"); + pr_info("initialized\n"); return 0; } @@ -738,12 +872,12 @@ static void __exit ashmem_exit(void) ret = misc_deregister(&ashmem_misc); if (unlikely(ret)) - printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); + pr_err("failed to unregister misc device!\n"); kmem_cache_destroy(ashmem_range_cachep); kmem_cache_destroy(ashmem_area_cachep); - printk(KERN_INFO "ashmem: unloaded\n"); + pr_info("unloaded\n"); } module_init(ashmem_init); |
