diff options
Diffstat (limited to 'drivers/staging/android/ashmem.c')
-rw-r--r-- | drivers/staging/android/ashmem.c | 139 |
1 files changed, 105 insertions, 34 deletions
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 8e76ddca099..23948f16701 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -37,41 +37,59 @@ #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) -/* - * ashmem_area - anonymous shared memory area - * Lifecycle: From our parent file's open() until its release() - * Locking: Protected by `ashmem_mutex' - * Big Note: Mappings do NOT pin this structure; it dies on close() +/** + * struct ashmem_area - The anonymous shared memory area + * @name: The optional name in /proc/pid/maps + * @unpinned_list: The list of all ashmem areas + * @file: The shmem-based backing file + * @size: The size of the mapping, in bytes + * @prot_masks: The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close() */ struct ashmem_area { - char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */ - struct list_head unpinned_list; /* list of all ashmem areas */ - struct file *file; /* the shmem-based backing file */ - size_t size; /* size of the mapping, in bytes */ - unsigned long prot_mask; /* allowed prot bits, as vm_flags */ + char name[ASHMEM_FULL_NAME_LEN]; + struct list_head unpinned_list; + struct file *file; + size_t size; + unsigned long prot_mask; }; -/* - * ashmem_range - represents an interval of unpinned (evictable) pages - * Lifecycle: From unpin to pin - * Locking: Protected by `ashmem_mutex' +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru: The entry in the LRU list + * @unpinned: The entry in its area's unpinned list + * @asma: The associated anonymous shared memory area. + * @pgstart: The starting page (inclusive) + * @pgend: The ending page (inclusive) + * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex' */ struct ashmem_range { - struct list_head lru; /* entry in LRU list */ - struct list_head unpinned; /* entry in its area's unpinned list */ - struct ashmem_area *asma; /* associated area */ - size_t pgstart; /* starting page, inclusive */ - size_t pgend; /* ending page, inclusive */ - unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ + struct list_head lru; + struct list_head unpinned; + struct ashmem_area *asma; + size_t pgstart; + size_t pgend; + unsigned int purged; }; /* LRU list of unpinned pages, protected by ashmem_mutex */ static LIST_HEAD(ashmem_lru_list); -/* Count of pages on our LRU list, protected by ashmem_mutex */ +/** + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */ static unsigned long lru_count; -/* +/** * ashmem_mutex - protects the list of and each individual ashmem_area * * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem @@ -105,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly; #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) +/** + * lru_add() - Adds a range of memory to the LRU list + * @range: The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */ static inline void lru_add(struct ashmem_range *range) { list_add_tail(&range->lru, &ashmem_lru_list); lru_count += range_size(range); } +/** + * lru_del() - Removes a range of memory from the LRU list + * @range: The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */ static inline void lru_del(struct ashmem_range *range) { list_del(&range->lru); lru_count -= range_size(range); } -/* - * range_alloc - allocate and initialize a new ashmem_range structure +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma: The associated ashmem_area + * @prev_range: The previous ashmem_range in the sorted asma->unpinned list + * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start: The starting page (inclusive) + * @end: The ending page (inclusive) * - * 'asma' - associated ashmem_area - * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list - * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) - * 'start' - starting page, inclusive - * 'end' - ending page, inclusive + * This function is protected by ashmem_mutex. * - * Caller must hold ashmem_mutex. + * Return: 0 if successful, or -ENOMEM if there is an error */ static int range_alloc(struct ashmem_area *asma, struct ashmem_range *prev_range, unsigned int purged, @@ -151,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma, return 0; } +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range: The associated ashmem_range that has previously been allocated + */ static void range_del(struct ashmem_range *range) { list_del(&range->unpinned); @@ -159,10 +196,17 @@ static void range_del(struct ashmem_range *range) kmem_cache_free(ashmem_range_cachep, range); } -/* - * range_shrink - shrinks a range +/** + * range_shrink() - Shrinks an ashmem_range + * @range: The associated ashmem_range being shrunk + * @start: The starting byte of the new range + * @end: The ending byte of the new range * - * Caller must hold ashmem_mutex. + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger. */ static inline void range_shrink(struct ashmem_range *range, size_t start, size_t end) @@ -176,6 +220,16 @@ static inline void range_shrink(struct ashmem_range *range, lru_count -= pre - range_size(range); } +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode: The backing file's index node(?) + * @file: The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */ static int ashmem_open(struct inode *inode, struct file *file) { struct ashmem_area *asma; @@ -197,6 +251,14 @@ static int ashmem_open(struct inode *inode, struct file *file) return 0; } +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored: The backing file's Index Node(?) - It is ignored here. + * @file: The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */ static int ashmem_release(struct inode *ignored, struct file *file) { struct ashmem_area *asma = file->private_data; @@ -214,6 +276,15 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } +/** + * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file + * @file: The associated backing file. + * @buf: The buffer of data being written to + * @len: The number of bytes being read + * @pos: The position of the first byte to read. + * + * Return: 0 if successful, or another return code if not. + */ static ssize_t ashmem_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { @@ -706,7 +777,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) .gfp_mask = GFP_KERNEL, .nr_to_scan = LONG_MAX, }; - + ret = ashmem_shrink_count(&ashmem_shrinker, &sc); nodes_setall(sc.nodes_to_scan); ashmem_shrink_scan(&ashmem_shrinker, &sc); } |