diff options
Diffstat (limited to 'drivers/staging/android')
42 files changed, 5964 insertions, 727 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index c0c95be0f96..99e484f845f 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -2,7 +2,6 @@ menu "Android"  config ANDROID  	bool "Android Drivers" -	default N  	---help---  	  Enable support for various drivers needed on the Android platform @@ -10,6 +9,7 @@ if ANDROID  config ANDROID_BINDER_IPC  	bool "Android Binder IPC Driver" +	depends on MMU  	default n  	---help---  	  Binder is used in Android for both communication between processes, @@ -19,6 +19,19 @@ config ANDROID_BINDER_IPC  	  Android process, using Binder to identify, invoke and pass arguments  	  between said processes. +config ANDROID_BINDER_IPC_32BIT +	bool +	depends on !64BIT && ANDROID_BINDER_IPC +	default y +	---help--- +	  The Binder API has been changed to support both 32 and 64bit +	  applications in a mixed environment. + +	  Enable this to support an old 32-bit Android user-space (v4.4 and +	  earlier). + +	  Note that enabling this will break newer Android user-space. +  config ASHMEM  	bool "Enable the Anonymous Shared Memory Subsystem"  	default n @@ -59,7 +72,6 @@ config ANDROID_TIMED_GPIO  config ANDROID_LOW_MEMORY_KILLER  	bool "Android Low Memory Killer" -	default N  	---help---  	  Registers processes to be killed when memory is low @@ -76,7 +88,7 @@ config SYNC  	bool "Synchronization framework"  	default n  	select ANON_INODES -	help +	---help---  	  This option enables the framework for synchronization between multiple  	  drivers.  Sync implementations can take advantage of hardware  	  synchronization built into devices like GPUs. @@ -85,7 +97,7 @@ config SW_SYNC  	bool "Software synchronization objects"  	default n  	depends on SYNC -	help +	---help---  	  A sync object driver that uses a 32bit counter to coordinate  	  syncrhronization.  Useful when there is no hardware primitive backing  	  the synchronization. @@ -94,11 +106,13 @@ config SW_SYNC_USER  	bool "Userspace API for SW_SYNC"  	default n  	depends on SW_SYNC -	help +	---help---  	  Provides a user space API to the sw sync object.  	  *WARNING* improper use of this can result in deadlocking kernel  	  drivers from userspace. +source "drivers/staging/android/ion/Kconfig" +  endif # if ANDROID  endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index c136299e05a..0a01e191490 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,5 +1,7 @@  ccflags-y += -I$(src)			# needed for trace events +obj-y					+= ion/ +  obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o  obj-$(CONFIG_ASHMEM)			+= ashmem.o  obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c index 6dc27dac679..f200e8a8432 100644 --- a/drivers/staging/android/alarm-dev.c +++ b/drivers/staging/android/alarm-dev.c @@ -60,14 +60,18 @@ struct devalarm {  static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT]; - +/** + * is_wakeup() - Checks to see if this alarm can wake the device + * @type:	 The type of alarm being checked + * + * Return: 1 if this is a wakeup alarm, otherwise 0 + */  static int is_wakeup(enum android_alarm_type type)  { -	return (type == ANDROID_ALARM_RTC_WAKEUP || -		type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); +	return type == ANDROID_ALARM_RTC_WAKEUP || +		type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP;  } -  static void devalarm_start(struct devalarm *alrm, ktime_t exp)  {  	if (is_wakeup(alrm->type)) @@ -76,7 +80,6 @@ static void devalarm_start(struct devalarm *alrm, ktime_t exp)  		hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS);  } -  static int devalarm_try_to_cancel(struct devalarm *alrm)  {  	if (is_wakeup(alrm->type)) @@ -107,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type)  	}  	alarm_enabled &= ~alarm_type_mask;  	spin_unlock_irqrestore(&alarm_slock, flags); -  }  static void alarm_set(enum android_alarm_type alarm_type, @@ -276,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)  	return 0;  } +  #ifdef CONFIG_COMPAT  static long alarm_compat_ioctl(struct file *file, unsigned int cmd,  							unsigned long arg) @@ -326,6 +329,7 @@ static int alarm_release(struct inode *inode, struct file *file)  	if (file->private_data) {  		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {  			uint32_t alarm_type_mask = 1U << i; +  			if (alarm_enabled & alarm_type_mask) {  				alarm_dbg(INFO,  					  "%s: clear alarm, pending %d\n", @@ -367,7 +371,6 @@ static void devalarm_triggered(struct devalarm *alarm)  	spin_unlock_irqrestore(&alarm_slock, flags);  } -  static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt)  {  	struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt); diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h index 4fd32f337f9..495b20cf3bf 100644 --- a/drivers/staging/android/android_alarm.h +++ b/drivers/staging/android/android_alarm.h @@ -16,50 +16,10 @@  #ifndef _LINUX_ANDROID_ALARM_H  #define _LINUX_ANDROID_ALARM_H -#include <linux/ioctl.h> -#include <linux/time.h>  #include <linux/compat.h> +#include <linux/ioctl.h> -enum android_alarm_type { -	/* return code bit numbers or set alarm arg */ -	ANDROID_ALARM_RTC_WAKEUP, -	ANDROID_ALARM_RTC, -	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, -	ANDROID_ALARM_ELAPSED_REALTIME, -	ANDROID_ALARM_SYSTEMTIME, - -	ANDROID_ALARM_TYPE_COUNT, - -	/* return code bit numbers */ -	/* ANDROID_ALARM_TIME_CHANGE = 16 */ -}; - -enum android_alarm_return_flags { -	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, -	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, -	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = -				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, -	ANDROID_ALARM_ELAPSED_REALTIME_MASK = -				1U << ANDROID_ALARM_ELAPSED_REALTIME, -	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, -	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 -}; - -/* Disable alarm */ -#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4)) - -/* Ack last alarm and wait for next */ -#define ANDROID_ALARM_WAIT                  _IO('a', 1) - -#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size) -/* Set alarm */ -#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec) -#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec) -#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec) -#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec) -#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0))) -#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4) - +#include "uapi/android_alarm.h"  #ifdef CONFIG_COMPAT  #define ANDROID_ALARM_SET_COMPAT(type)		ALARM_IOW(2, type, \ diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 8e76ddca099..713a9722678 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -37,41 +37,59 @@  #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)  #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) -/* - * ashmem_area - anonymous shared memory area - * Lifecycle: From our parent file's open() until its release() - * Locking: Protected by `ashmem_mutex' - * Big Note: Mappings do NOT pin this structure; it dies on close() +/** + * struct ashmem_area - The anonymous shared memory area + * @name:		The optional name in /proc/pid/maps + * @unpinned_list:	The list of all ashmem areas + * @file:		The shmem-based backing file + * @size:		The size of the mapping, in bytes + * @prot_masks:		The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close()   */  struct ashmem_area { -	char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */ -	struct list_head unpinned_list;	 /* list of all ashmem areas */ -	struct file *file;		 /* the shmem-based backing file */ -	size_t size;			 /* size of the mapping, in bytes */ -	unsigned long prot_mask;	 /* allowed prot bits, as vm_flags */ +	char name[ASHMEM_FULL_NAME_LEN]; +	struct list_head unpinned_list; +	struct file *file; +	size_t size; +	unsigned long prot_mask;  }; -/* - * ashmem_range - represents an interval of unpinned (evictable) pages - * Lifecycle: From unpin to pin - * Locking: Protected by `ashmem_mutex' +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru:	         The entry in the LRU list + * @unpinned:	         The entry in its area's unpinned list + * @asma:	         The associated anonymous shared memory area. + * @pgstart:	         The starting page (inclusive) + * @pgend:	         The ending page (inclusive) + * @purged:	         The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex'   */  struct ashmem_range { -	struct list_head lru;		/* entry in LRU list */ -	struct list_head unpinned;	/* entry in its area's unpinned list */ -	struct ashmem_area *asma;	/* associated area */ -	size_t pgstart;			/* starting page, inclusive */ -	size_t pgend;			/* ending page, inclusive */ -	unsigned int purged;		/* ASHMEM_NOT or ASHMEM_WAS_PURGED */ +	struct list_head lru; +	struct list_head unpinned; +	struct ashmem_area *asma; +	size_t pgstart; +	size_t pgend; +	unsigned int purged;  };  /* LRU list of unpinned pages, protected by ashmem_mutex */  static LIST_HEAD(ashmem_lru_list); -/* Count of pages on our LRU list, protected by ashmem_mutex */ +/** + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */  static unsigned long lru_count; -/* +/**   * ashmem_mutex - protects the list of and each individual ashmem_area   *   * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem @@ -105,28 +123,43 @@ static struct kmem_cache *ashmem_range_cachep __read_mostly;  #define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE) +/** + * lru_add() - Adds a range of memory to the LRU list + * @range:     The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */  static inline void lru_add(struct ashmem_range *range)  {  	list_add_tail(&range->lru, &ashmem_lru_list);  	lru_count += range_size(range);  } +/** + * lru_del() - Removes a range of memory from the LRU list + * @range:     The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */  static inline void lru_del(struct ashmem_range *range)  {  	list_del(&range->lru);  	lru_count -= range_size(range);  } -/* - * range_alloc - allocate and initialize a new ashmem_range structure +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma:	   The associated ashmem_area + * @prev_range:	   The previous ashmem_range in the sorted asma->unpinned list + * @purged:	   Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start:	   The starting page (inclusive) + * @end:	   The ending page (inclusive)   * - * 'asma' - associated ashmem_area - * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list - * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) - * 'start' - starting page, inclusive - * 'end' - ending page, inclusive + * This function is protected by ashmem_mutex.   * - * Caller must hold ashmem_mutex. + * Return: 0 if successful, or -ENOMEM if there is an error   */  static int range_alloc(struct ashmem_area *asma,  		       struct ashmem_range *prev_range, unsigned int purged, @@ -151,6 +184,10 @@ static int range_alloc(struct ashmem_area *asma,  	return 0;  } +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range:	 The associated ashmem_range that has previously been allocated + */  static void range_del(struct ashmem_range *range)  {  	list_del(&range->unpinned); @@ -159,10 +196,17 @@ static void range_del(struct ashmem_range *range)  	kmem_cache_free(ashmem_range_cachep, range);  } -/* - * range_shrink - shrinks a range +/** + * range_shrink() - Shrinks an ashmem_range + * @range:	    The associated ashmem_range being shrunk + * @start:	    The starting byte of the new range + * @end:	    The ending byte of the new range   * - * Caller must hold ashmem_mutex. + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger.   */  static inline void range_shrink(struct ashmem_range *range,  				size_t start, size_t end) @@ -176,6 +220,16 @@ static inline void range_shrink(struct ashmem_range *range,  		lru_count -= pre - range_size(range);  } +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode:	   The backing file's index node(?) + * @file:	   The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */  static int ashmem_open(struct inode *inode, struct file *file)  {  	struct ashmem_area *asma; @@ -197,6 +251,14 @@ static int ashmem_open(struct inode *inode, struct file *file)  	return 0;  } +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored:	      The backing file's Index Node(?) - It is ignored here. + * @file:	      The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */  static int ashmem_release(struct inode *ignored, struct file *file)  {  	struct ashmem_area *asma = file->private_data; @@ -214,6 +276,15 @@ static int ashmem_release(struct inode *ignored, struct file *file)  	return 0;  } +/** + * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file + * @file:	   The associated backing file. + * @buf:	   The buffer of data being written to + * @len:	   The number of bytes being read + * @pos:	   The position of the first byte to read. + * + * Return: 0 if successful, or another return code if not. + */  static ssize_t ashmem_read(struct file *file, char __user *buf,  			   size_t len, loff_t *pos)  { @@ -224,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,  	/* If size is not set, or set to 0, always return EOF. */  	if (asma->size == 0) -		goto out; +		goto out_unlock;  	if (!asma->file) {  		ret = -EBADF; -		goto out; +		goto out_unlock;  	} -	ret = asma->file->f_op->read(asma->file, buf, len, pos); -	if (ret < 0) -		goto out; +	mutex_unlock(&ashmem_mutex); -	/** Update backing file pos, since f_ops->read() doesn't */ -	asma->file->f_pos = *pos; +	/* +	 * asma and asma->file are used outside the lock here.  We assume +	 * once asma->file is set it will never be changed, and will not +	 * be destroyed until all references to the file are dropped and +	 * ashmem_release is called. +	 */ +	ret = asma->file->f_op->read(asma->file, buf, len, pos); +	if (ret >= 0) { +		/** Update backing file pos, since f_ops->read() doesn't */ +		asma->file->f_pos = *pos; +	} +	return ret; -out: +out_unlock:  	mutex_unlock(&ashmem_mutex);  	return ret;  } @@ -427,6 +506,7 @@ out:  static int set_name(struct ashmem_area *asma, void __user *name)  { +	int len;  	int ret = 0;  	char local_name[ASHMEM_NAME_LEN]; @@ -439,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)  	 * variable that does not need protection and later copy the local  	 * variable to the structure member with lock held.  	 */ -	if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) -		return -EFAULT; - +	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); +	if (len < 0) +		return len; +	if (len == ASHMEM_NAME_LEN) +		local_name[ASHMEM_NAME_LEN - 1] = '\0';  	mutex_lock(&ashmem_mutex);  	/* cannot change an existing mapping's name */ -	if (unlikely(asma->file)) { +	if (unlikely(asma->file))  		ret = -EINVAL; -		goto out; -	} -	memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, -		local_name, ASHMEM_NAME_LEN); -	asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; -out: -	mutex_unlock(&ashmem_mutex); +	else +		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); +	mutex_unlock(&ashmem_mutex);  	return ret;  } @@ -706,7 +784,7 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)  				.gfp_mask = GFP_KERNEL,  				.nr_to_scan = LONG_MAX,  			}; - +			ret = ashmem_shrink_count(&ashmem_shrinker, &sc);  			nodes_setall(sc.nodes_to_scan);  			ashmem_shrink_scan(&ashmem_shrinker, &sc);  		} diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h index 8dc0f0d3adf..5abcfd7aa70 100644 --- a/drivers/staging/android/ashmem.h +++ b/drivers/staging/android/ashmem.h @@ -16,35 +16,7 @@  #include <linux/ioctl.h>  #include <linux/compat.h> -#define ASHMEM_NAME_LEN		256 - -#define ASHMEM_NAME_DEF		"dev/ashmem" - -/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ -#define ASHMEM_NOT_PURGED	0 -#define ASHMEM_WAS_PURGED	1 - -/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ -#define ASHMEM_IS_UNPINNED	0 -#define ASHMEM_IS_PINNED	1 - -struct ashmem_pin { -	__u32 offset;	/* offset into region, in bytes, page-aligned */ -	__u32 len;	/* length forward from offset, in bytes, page-aligned */ -}; - -#define __ASHMEMIOC		0x77 - -#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) -#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) -#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t) -#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4) -#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long) -#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6) -#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin) -#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin) -#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9) -#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10) +#include "uapi/ashmem.h"  /* support of 32bit userspace on 64bit platforms */  #ifdef CONFIG_COMPAT diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 98ac020bf91..a741da77828 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -118,6 +118,7 @@ static int binder_set_stop_on_user_error(const char *val,  					 struct kernel_param *kp)  {  	int ret; +  	ret = param_set_int(val, kp);  	if (binder_stop_on_user_error < 2)  		wake_up(&binder_user_error_wait); @@ -194,6 +195,7 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(  	struct binder_transaction_log *log)  {  	struct binder_transaction_log_entry *e; +  	e = &log->entry[log->next];  	memset(e, 0, sizeof(*e));  	log->next++; @@ -228,8 +230,8 @@ struct binder_node {  	int internal_strong_refs;  	int local_weak_refs;  	int local_strong_refs; -	void __user *ptr; -	void __user *cookie; +	binder_uintptr_t ptr; +	binder_uintptr_t cookie;  	unsigned has_strong_ref:1;  	unsigned pending_strong_ref:1;  	unsigned has_weak_ref:1; @@ -242,7 +244,7 @@ struct binder_node {  struct binder_ref_death {  	struct binder_work work; -	void __user *cookie; +	binder_uintptr_t cookie;  };  struct binder_ref { @@ -432,16 +434,17 @@ static inline void binder_unlock(const char *tag)  static void binder_set_nice(long nice)  {  	long min_nice; +  	if (can_nice(current, nice)) {  		set_user_nice(current, nice);  		return;  	} -	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; +	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);  	binder_debug(BINDER_DEBUG_PRIORITY_CAP,  		     "%d: nice value %ld not allowed use %ld instead\n",  		      current->pid, nice, min_nice);  	set_user_nice(current, min_nice); -	if (min_nice < 20) +	if (min_nice <= MAX_NICE)  		return;  	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);  } @@ -515,14 +518,14 @@ static void binder_insert_allocated_buffer(struct binder_proc *proc,  }  static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, -						  void __user *user_ptr) +						  uintptr_t user_ptr)  {  	struct rb_node *n = proc->allocated_buffers.rb_node;  	struct binder_buffer *buffer;  	struct binder_buffer *kern_ptr; -	kern_ptr = user_ptr - proc->user_buffer_offset -		- offsetof(struct binder_buffer, data); +	kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset +		- offsetof(struct binder_buffer, data));  	while (n) {  		buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -584,6 +587,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,  	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {  		int ret;  		struct page **page_array_ptr; +  		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];  		BUG_ON(*page); @@ -726,6 +730,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,  	binder_insert_allocated_buffer(proc, buffer);  	if (buffer_size != size) {  		struct binder_buffer *new_buffer = (void *)buffer->data + size; +  		list_add(&new_buffer->entry, &buffer->entry);  		new_buffer->free = 1;  		binder_insert_free_buffer(proc, new_buffer); @@ -838,6 +843,7 @@ static void binder_free_buf(struct binder_proc *proc,  	if (!list_is_last(&buffer->entry, &proc->buffers)) {  		struct binder_buffer *next = list_entry(buffer->entry.next,  						struct binder_buffer, entry); +  		if (next->free) {  			rb_erase(&next->rb_node, &proc->free_buffers);  			binder_delete_free_buffer(proc, next); @@ -846,6 +852,7 @@ static void binder_free_buf(struct binder_proc *proc,  	if (proc->buffers.next != &buffer->entry) {  		struct binder_buffer *prev = list_entry(buffer->entry.prev,  						struct binder_buffer, entry); +  		if (prev->free) {  			binder_delete_free_buffer(proc, buffer);  			rb_erase(&prev->rb_node, &proc->free_buffers); @@ -856,7 +863,7 @@ static void binder_free_buf(struct binder_proc *proc,  }  static struct binder_node *binder_get_node(struct binder_proc *proc, -					   void __user *ptr) +					   binder_uintptr_t ptr)  {  	struct rb_node *n = proc->nodes.rb_node;  	struct binder_node *node; @@ -875,8 +882,8 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,  }  static struct binder_node *binder_new_node(struct binder_proc *proc, -					   void __user *ptr, -					   void __user *cookie) +					   binder_uintptr_t ptr, +					   binder_uintptr_t cookie)  {  	struct rb_node **p = &proc->nodes.rb_node;  	struct rb_node *parent = NULL; @@ -908,9 +915,9 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,  	INIT_LIST_HEAD(&node->work.entry);  	INIT_LIST_HEAD(&node->async_todo);  	binder_debug(BINDER_DEBUG_INTERNAL_REFS, -		     "%d:%d node %d u%p c%p created\n", +		     "%d:%d node %d u%016llx c%016llx created\n",  		     proc->pid, current->pid, node->debug_id, -		     node->ptr, node->cookie); +		     (u64)node->ptr, (u64)node->cookie);  	return node;  } @@ -1107,6 +1114,7 @@ static int binder_inc_ref(struct binder_ref *ref, int strong,  			  struct list_head *target_list)  {  	int ret; +  	if (strong) {  		if (ref->strong == 0) {  			ret = binder_inc_node(ref->node, 1, 1, target_list); @@ -1138,6 +1146,7 @@ static int binder_dec_ref(struct binder_ref *ref, int strong)  		ref->strong--;  		if (ref->strong == 0) {  			int ret; +  			ret = binder_dec_node(ref->node, strong, 1);  			if (ret)  				return ret; @@ -1177,6 +1186,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,  				     uint32_t error_code)  {  	struct binder_thread *target_thread; +  	BUG_ON(t->flags & TF_ONE_WAY);  	while (1) {  		target_thread = t->from; @@ -1226,9 +1236,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,  static void binder_transaction_buffer_release(struct binder_proc *proc,  					      struct binder_buffer *buffer, -					      size_t *failed_at) +					      binder_size_t *failed_at)  { -	size_t *offp, *off_end; +	binder_size_t *offp, *off_end;  	int debug_id = buffer->debug_id;  	binder_debug(BINDER_DEBUG_TRANSACTION, @@ -1239,18 +1249,20 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,  	if (buffer->target_node)  		binder_dec_node(buffer->target_node, 1, 0); -	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); +	offp = (binder_size_t *)(buffer->data + +				 ALIGN(buffer->data_size, sizeof(void *)));  	if (failed_at)  		off_end = failed_at;  	else  		off_end = (void *)offp + buffer->offsets_size;  	for (; offp < off_end; offp++) {  		struct flat_binder_object *fp; +  		if (*offp > buffer->data_size - sizeof(*fp) ||  		    buffer->data_size < sizeof(*fp) ||  		    !IS_ALIGNED(*offp, sizeof(u32))) { -			pr_err("transaction release %d bad offset %zd, size %zd\n", -			 debug_id, *offp, buffer->data_size); +			pr_err("transaction release %d bad offset %lld, size %zd\n", +			       debug_id, (u64)*offp, buffer->data_size);  			continue;  		}  		fp = (struct flat_binder_object *)(buffer->data + *offp); @@ -1258,19 +1270,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,  		case BINDER_TYPE_BINDER:  		case BINDER_TYPE_WEAK_BINDER: {  			struct binder_node *node = binder_get_node(proc, fp->binder); +  			if (node == NULL) { -				pr_err("transaction release %d bad node %p\n", -					debug_id, fp->binder); +				pr_err("transaction release %d bad node %016llx\n", +				       debug_id, (u64)fp->binder);  				break;  			}  			binder_debug(BINDER_DEBUG_TRANSACTION, -				     "        node %d u%p\n", -				     node->debug_id, node->ptr); +				     "        node %d u%016llx\n", +				     node->debug_id, (u64)node->ptr);  			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);  		} break;  		case BINDER_TYPE_HANDLE:  		case BINDER_TYPE_WEAK_HANDLE: {  			struct binder_ref *ref = binder_get_ref(proc, fp->handle); +  			if (ref == NULL) {  				pr_err("transaction release %d bad handle %d\n",  				 debug_id, fp->handle); @@ -1303,7 +1317,7 @@ static void binder_transaction(struct binder_proc *proc,  {  	struct binder_transaction *t;  	struct binder_work *tcomplete; -	size_t *offp, *off_end; +	binder_size_t *offp, *off_end;  	struct binder_proc *target_proc;  	struct binder_thread *target_thread = NULL;  	struct binder_node *target_node = NULL; @@ -1362,6 +1376,7 @@ static void binder_transaction(struct binder_proc *proc,  	} else {  		if (tr->target.handle) {  			struct binder_ref *ref; +  			ref = binder_get_ref(proc, tr->target.handle);  			if (ref == NULL) {  				binder_user_error("%d:%d got transaction to invalid handle\n", @@ -1385,6 +1400,7 @@ static void binder_transaction(struct binder_proc *proc,  		}  		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {  			struct binder_transaction *tmp; +  			tmp = thread->transaction_stack;  			if (tmp->to_thread != thread) {  				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", @@ -1432,24 +1448,26 @@ static void binder_transaction(struct binder_proc *proc,  	if (reply)  		binder_debug(BINDER_DEBUG_TRANSACTION, -			     "%d:%d BC_REPLY %d -> %d:%d, data %p-%p size %zd-%zd\n", +			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",  			     proc->pid, thread->pid, t->debug_id,  			     target_proc->pid, target_thread->pid, -			     tr->data.ptr.buffer, tr->data.ptr.offsets, -			     tr->data_size, tr->offsets_size); +			     (u64)tr->data.ptr.buffer, +			     (u64)tr->data.ptr.offsets, +			     (u64)tr->data_size, (u64)tr->offsets_size);  	else  		binder_debug(BINDER_DEBUG_TRANSACTION, -			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %p-%p size %zd-%zd\n", +			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",  			     proc->pid, thread->pid, t->debug_id,  			     target_proc->pid, target_node->debug_id, -			     tr->data.ptr.buffer, tr->data.ptr.offsets, -			     tr->data_size, tr->offsets_size); +			     (u64)tr->data.ptr.buffer, +			     (u64)tr->data.ptr.offsets, +			     (u64)tr->data_size, (u64)tr->offsets_size);  	if (!reply && !(tr->flags & TF_ONE_WAY))  		t->from = thread;  	else  		t->from = NULL; -	t->sender_euid = proc->tsk->cred->euid; +	t->sender_euid = task_euid(proc->tsk);  	t->to_proc = target_proc;  	t->to_thread = target_thread;  	t->code = tr->code; @@ -1472,34 +1490,38 @@ static void binder_transaction(struct binder_proc *proc,  	if (target_node)  		binder_inc_node(target_node, 1, 0, NULL); -	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); +	offp = (binder_size_t *)(t->buffer->data + +				 ALIGN(tr->data_size, sizeof(void *))); -	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { +	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) +			   tr->data.ptr.buffer, tr->data_size)) {  		binder_user_error("%d:%d got transaction with invalid data ptr\n",  				proc->pid, thread->pid);  		return_error = BR_FAILED_REPLY;  		goto err_copy_data_failed;  	} -	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { +	if (copy_from_user(offp, (const void __user *)(uintptr_t) +			   tr->data.ptr.offsets, tr->offsets_size)) {  		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",  				proc->pid, thread->pid);  		return_error = BR_FAILED_REPLY;  		goto err_copy_data_failed;  	} -	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { -		binder_user_error("%d:%d got transaction with invalid offsets size, %zd\n", -				proc->pid, thread->pid, tr->offsets_size); +	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { +		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", +				proc->pid, thread->pid, (u64)tr->offsets_size);  		return_error = BR_FAILED_REPLY;  		goto err_bad_offset;  	}  	off_end = (void *)offp + tr->offsets_size;  	for (; offp < off_end; offp++) {  		struct flat_binder_object *fp; +  		if (*offp > t->buffer->data_size - sizeof(*fp) ||  		    t->buffer->data_size < sizeof(*fp) ||  		    !IS_ALIGNED(*offp, sizeof(u32))) { -			binder_user_error("%d:%d got transaction with invalid offset, %zd\n", -					proc->pid, thread->pid, *offp); +			binder_user_error("%d:%d got transaction with invalid offset, %lld\n", +					  proc->pid, thread->pid, (u64)*offp);  			return_error = BR_FAILED_REPLY;  			goto err_bad_offset;  		} @@ -1509,6 +1531,7 @@ static void binder_transaction(struct binder_proc *proc,  		case BINDER_TYPE_WEAK_BINDER: {  			struct binder_ref *ref;  			struct binder_node *node = binder_get_node(proc, fp->binder); +  			if (node == NULL) {  				node = binder_new_node(proc, fp->binder, fp->cookie);  				if (node == NULL) { @@ -1519,10 +1542,11 @@ static void binder_transaction(struct binder_proc *proc,  				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);  			}  			if (fp->cookie != node->cookie) { -				binder_user_error("%d:%d sending u%p node %d, cookie mismatch %p != %p\n", +				binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",  					proc->pid, thread->pid, -					fp->binder, node->debug_id, -					fp->cookie, node->cookie); +					(u64)fp->binder, node->debug_id, +					(u64)fp->cookie, (u64)node->cookie); +				return_error = BR_FAILED_REPLY;  				goto err_binder_get_ref_for_node_failed;  			}  			ref = binder_get_ref_for_node(target_proc, node); @@ -1540,13 +1564,14 @@ static void binder_transaction(struct binder_proc *proc,  			trace_binder_transaction_node_to_ref(t, node, ref);  			binder_debug(BINDER_DEBUG_TRANSACTION, -				     "        node %d u%p -> ref %d desc %d\n", -				     node->debug_id, node->ptr, ref->debug_id, -				     ref->desc); +				     "        node %d u%016llx -> ref %d desc %d\n", +				     node->debug_id, (u64)node->ptr, +				     ref->debug_id, ref->desc);  		} break;  		case BINDER_TYPE_HANDLE:  		case BINDER_TYPE_WEAK_HANDLE: {  			struct binder_ref *ref = binder_get_ref(proc, fp->handle); +  			if (ref == NULL) {  				binder_user_error("%d:%d got transaction with invalid handle, %d\n",  						proc->pid, @@ -1564,11 +1589,12 @@ static void binder_transaction(struct binder_proc *proc,  				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);  				trace_binder_transaction_ref_to_node(t, ref);  				binder_debug(BINDER_DEBUG_TRANSACTION, -					     "        ref %d desc %d -> node %d u%p\n", +					     "        ref %d desc %d -> node %d u%016llx\n",  					     ref->debug_id, ref->desc, ref->node->debug_id, -					     ref->node->ptr); +					     (u64)ref->node->ptr);  			} else {  				struct binder_ref *new_ref; +  				new_ref = binder_get_ref_for_node(target_proc, ref->node);  				if (new_ref == NULL) {  					return_error = BR_FAILED_REPLY; @@ -1682,12 +1708,13 @@ err_dead_binder:  err_invalid_target_handle:  err_no_context_mgr_node:  	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, -		     "%d:%d transaction failed %d, size %zd-%zd\n", +		     "%d:%d transaction failed %d, size %lld-%lld\n",  		     proc->pid, thread->pid, return_error, -		     tr->data_size, tr->offsets_size); +		     (u64)tr->data_size, (u64)tr->offsets_size);  	{  		struct binder_transaction_log_entry *fe; +  		fe = binder_transaction_log_add(&binder_transaction_log_failed);  		*fe = *e;  	} @@ -1700,10 +1727,13 @@ err_no_context_mgr_node:  		thread->return_error = return_error;  } -int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, -			void __user *buffer, size_t size, size_t *consumed) +static int binder_thread_write(struct binder_proc *proc, +			struct binder_thread *thread, +			binder_uintptr_t binder_buffer, size_t size, +			binder_size_t *consumed)  {  	uint32_t cmd; +	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;  	void __user *ptr = buffer + *consumed;  	void __user *end = buffer + size; @@ -1772,33 +1802,33 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  		}  		case BC_INCREFS_DONE:  		case BC_ACQUIRE_DONE: { -			void __user *node_ptr; -			void *cookie; +			binder_uintptr_t node_ptr; +			binder_uintptr_t cookie;  			struct binder_node *node; -			if (get_user(node_ptr, (void * __user *)ptr)) +			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))  				return -EFAULT; -			ptr += sizeof(void *); -			if (get_user(cookie, (void * __user *)ptr)) +			ptr += sizeof(binder_uintptr_t); +			if (get_user(cookie, (binder_uintptr_t __user *)ptr))  				return -EFAULT; -			ptr += sizeof(void *); +			ptr += sizeof(binder_uintptr_t);  			node = binder_get_node(proc, node_ptr);  			if (node == NULL) { -				binder_user_error("%d:%d %s u%p no match\n", +				binder_user_error("%d:%d %s u%016llx no match\n",  					proc->pid, thread->pid,  					cmd == BC_INCREFS_DONE ?  					"BC_INCREFS_DONE" :  					"BC_ACQUIRE_DONE", -					node_ptr); +					(u64)node_ptr);  				break;  			}  			if (cookie != node->cookie) { -				binder_user_error("%d:%d %s u%p node %d cookie mismatch %p != %p\n", +				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",  					proc->pid, thread->pid,  					cmd == BC_INCREFS_DONE ?  					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", -					node_ptr, node->debug_id, -					cookie, node->cookie); +					(u64)node_ptr, node->debug_id, +					(u64)cookie, (u64)node->cookie);  				break;  			}  			if (cmd == BC_ACQUIRE_DONE) { @@ -1834,27 +1864,28 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  			return -EINVAL;  		case BC_FREE_BUFFER: { -			void __user *data_ptr; +			binder_uintptr_t data_ptr;  			struct binder_buffer *buffer; -			if (get_user(data_ptr, (void * __user *)ptr)) +			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))  				return -EFAULT; -			ptr += sizeof(void *); +			ptr += sizeof(binder_uintptr_t);  			buffer = binder_buffer_lookup(proc, data_ptr);  			if (buffer == NULL) { -				binder_user_error("%d:%d BC_FREE_BUFFER u%p no match\n", -					proc->pid, thread->pid, data_ptr); +				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", +					proc->pid, thread->pid, (u64)data_ptr);  				break;  			}  			if (!buffer->allow_user_free) { -				binder_user_error("%d:%d BC_FREE_BUFFER u%p matched unreturned buffer\n", -					proc->pid, thread->pid, data_ptr); +				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", +					proc->pid, thread->pid, (u64)data_ptr);  				break;  			}  			binder_debug(BINDER_DEBUG_FREE_BUFFER, -				     "%d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", -				     proc->pid, thread->pid, data_ptr, buffer->debug_id, +				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", +				     proc->pid, thread->pid, (u64)data_ptr, +				     buffer->debug_id,  				     buffer->transaction ? "active" : "finished");  			if (buffer->transaction) { @@ -1924,16 +1955,16 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  		case BC_REQUEST_DEATH_NOTIFICATION:  		case BC_CLEAR_DEATH_NOTIFICATION: {  			uint32_t target; -			void __user *cookie; +			binder_uintptr_t cookie;  			struct binder_ref *ref;  			struct binder_ref_death *death;  			if (get_user(target, (uint32_t __user *)ptr))  				return -EFAULT;  			ptr += sizeof(uint32_t); -			if (get_user(cookie, (void __user * __user *)ptr)) +			if (get_user(cookie, (binder_uintptr_t __user *)ptr))  				return -EFAULT; -			ptr += sizeof(void *); +			ptr += sizeof(binder_uintptr_t);  			ref = binder_get_ref(proc, target);  			if (ref == NULL) {  				binder_user_error("%d:%d %s invalid ref %d\n", @@ -1946,12 +1977,12 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  			}  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, -				     "%d:%d %s %p ref %d desc %d s %d w %d for node %d\n", +				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",  				     proc->pid, thread->pid,  				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?  				     "BC_REQUEST_DEATH_NOTIFICATION" :  				     "BC_CLEAR_DEATH_NOTIFICATION", -				     cookie, ref->debug_id, ref->desc, +				     (u64)cookie, ref->debug_id, ref->desc,  				     ref->strong, ref->weak, ref->node->debug_id);  			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { @@ -1989,9 +2020,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  				}  				death = ref->death;  				if (death->cookie != cookie) { -					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %p != %p\n", +					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",  						proc->pid, thread->pid, -						death->cookie, cookie); +						(u64)death->cookie, +						(u64)cookie);  					break;  				}  				ref->death = NULL; @@ -2011,25 +2043,28 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  		} break;  		case BC_DEAD_BINDER_DONE: {  			struct binder_work *w; -			void __user *cookie; +			binder_uintptr_t cookie;  			struct binder_ref_death *death = NULL; -			if (get_user(cookie, (void __user * __user *)ptr)) + +			if (get_user(cookie, (binder_uintptr_t __user *)ptr))  				return -EFAULT;  			ptr += sizeof(void *);  			list_for_each_entry(w, &proc->delivered_death, entry) {  				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); +  				if (tmp_death->cookie == cookie) {  					death = tmp_death;  					break;  				}  			}  			binder_debug(BINDER_DEBUG_DEAD_BINDER, -				     "%d:%d BC_DEAD_BINDER_DONE %p found %p\n", -				     proc->pid, thread->pid, cookie, death); +				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", +				     proc->pid, thread->pid, (u64)cookie, +				     death);  			if (death == NULL) { -				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %p not found\n", -					proc->pid, thread->pid, cookie); +				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", +					proc->pid, thread->pid, (u64)cookie);  				break;  			} @@ -2055,8 +2090,8 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,  	return 0;  } -void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, -		    uint32_t cmd) +static void binder_stat_br(struct binder_proc *proc, +			   struct binder_thread *thread, uint32_t cmd)  {  	trace_binder_return(cmd);  	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { @@ -2081,9 +2116,10 @@ static int binder_has_thread_work(struct binder_thread *thread)  static int binder_thread_read(struct binder_proc *proc,  			      struct binder_thread *thread, -			      void  __user *buffer, size_t size, -			      size_t *consumed, int non_block) +			      binder_uintptr_t binder_buffer, size_t size, +			      binder_size_t *consumed, int non_block)  { +	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;  	void __user *ptr = buffer + *consumed;  	void __user *end = buffer + size; @@ -2203,6 +2239,7 @@ retry:  			const char *cmd_name;  			int strong = node->internal_strong_refs || node->local_strong_refs;  			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; +  			if (weak && !node->has_weak_ref) {  				cmd = BR_INCREFS;  				cmd_name = "BR_INCREFS"; @@ -2228,32 +2265,40 @@ retry:  				if (put_user(cmd, (uint32_t __user *)ptr))  					return -EFAULT;  				ptr += sizeof(uint32_t); -				if (put_user(node->ptr, (void * __user *)ptr)) +				if (put_user(node->ptr, +					     (binder_uintptr_t __user *)ptr))  					return -EFAULT; -				ptr += sizeof(void *); -				if (put_user(node->cookie, (void * __user *)ptr)) +				ptr += sizeof(binder_uintptr_t); +				if (put_user(node->cookie, +					     (binder_uintptr_t __user *)ptr))  					return -EFAULT; -				ptr += sizeof(void *); +				ptr += sizeof(binder_uintptr_t);  				binder_stat_br(proc, thread, cmd);  				binder_debug(BINDER_DEBUG_USER_REFS, -					     "%d:%d %s %d u%p c%p\n", -					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); +					     "%d:%d %s %d u%016llx c%016llx\n", +					     proc->pid, thread->pid, cmd_name, +					     node->debug_id, +					     (u64)node->ptr, (u64)node->cookie);  			} else {  				list_del_init(&w->entry);  				if (!weak && !strong) {  					binder_debug(BINDER_DEBUG_INTERNAL_REFS, -						     "%d:%d node %d u%p c%p deleted\n", -						     proc->pid, thread->pid, node->debug_id, -						     node->ptr, node->cookie); +						     "%d:%d node %d u%016llx c%016llx deleted\n", +						     proc->pid, thread->pid, +						     node->debug_id, +						     (u64)node->ptr, +						     (u64)node->cookie);  					rb_erase(&node->rb_node, &proc->nodes);  					kfree(node);  					binder_stats_deleted(BINDER_STAT_NODE);  				} else {  					binder_debug(BINDER_DEBUG_INTERNAL_REFS, -						     "%d:%d node %d u%p c%p state unchanged\n", -						     proc->pid, thread->pid, node->debug_id, node->ptr, -						     node->cookie); +						     "%d:%d node %d u%016llx c%016llx state unchanged\n", +						     proc->pid, thread->pid, +						     node->debug_id, +						     (u64)node->ptr, +						     (u64)node->cookie);  				}  			}  		} break; @@ -2271,17 +2316,18 @@ retry:  			if (put_user(cmd, (uint32_t __user *)ptr))  				return -EFAULT;  			ptr += sizeof(uint32_t); -			if (put_user(death->cookie, (void * __user *)ptr)) +			if (put_user(death->cookie, +				     (binder_uintptr_t __user *)ptr))  				return -EFAULT; -			ptr += sizeof(void *); +			ptr += sizeof(binder_uintptr_t);  			binder_stat_br(proc, thread, cmd);  			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, -				     "%d:%d %s %p\n", +				     "%d:%d %s %016llx\n",  				      proc->pid, thread->pid,  				      cmd == BR_DEAD_BINDER ?  				      "BR_DEAD_BINDER" :  				      "BR_CLEAR_DEATH_NOTIFICATION_DONE", -				      death->cookie); +				      (u64)death->cookie);  			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {  				list_del(&w->entry); @@ -2300,6 +2346,7 @@ retry:  		BUG_ON(t->buffer == NULL);  		if (t->buffer->target_node) {  			struct binder_node *target_node = t->buffer->target_node; +  			tr.target.ptr = target_node->ptr;  			tr.cookie =  target_node->cookie;  			t->saved_priority = task_nice(current); @@ -2311,8 +2358,8 @@ retry:  				binder_set_nice(target_node->min_priority);  			cmd = BR_TRANSACTION;  		} else { -			tr.target.ptr = NULL; -			tr.cookie = NULL; +			tr.target.ptr = 0; +			tr.cookie = 0;  			cmd = BR_REPLY;  		}  		tr.code = t->code; @@ -2321,6 +2368,7 @@ retry:  		if (t->from) {  			struct task_struct *sender = t->from->proc->tsk; +  			tr.sender_pid = task_tgid_nr_ns(sender,  							task_active_pid_ns(current));  		} else { @@ -2329,8 +2377,9 @@ retry:  		tr.data_size = t->buffer->data_size;  		tr.offsets_size = t->buffer->offsets_size; -		tr.data.ptr.buffer = (void *)t->buffer->data + -					proc->user_buffer_offset; +		tr.data.ptr.buffer = (binder_uintptr_t)( +					(uintptr_t)t->buffer->data + +					proc->user_buffer_offset);  		tr.data.ptr.offsets = tr.data.ptr.buffer +  					ALIGN(t->buffer->data_size,  					    sizeof(void *)); @@ -2345,14 +2394,14 @@ retry:  		trace_binder_transaction_received(t);  		binder_stat_br(proc, thread, cmd);  		binder_debug(BINDER_DEBUG_TRANSACTION, -			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %p-%p\n", +			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",  			     proc->pid, thread->pid,  			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :  			     "BR_REPLY",  			     t->debug_id, t->from ? t->from->proc->pid : 0,  			     t->from ? t->from->pid : 0, cmd,  			     t->buffer->data_size, t->buffer->offsets_size, -			     tr.data.ptr.buffer, tr.data.ptr.offsets); +			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);  		list_del(&t->work.entry);  		t->buffer->allow_user_free = 1; @@ -2390,6 +2439,7 @@ done:  static void binder_release_work(struct list_head *list)  {  	struct binder_work *w; +  	while (!list_empty(list)) {  		w = list_first_entry(list, struct binder_work, entry);  		list_del_init(&w->entry); @@ -2422,8 +2472,8 @@ static void binder_release_work(struct list_head *list)  			death = container_of(w, struct binder_ref_death, work);  			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, -				"undelivered death notification, %p\n", -				death->cookie); +				"undelivered death notification, %016llx\n", +				(u64)death->cookie);  			kfree(death);  			binder_stats_deleted(BINDER_STAT_DEATH);  		} break; @@ -2551,6 +2601,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  	struct binder_thread *thread;  	unsigned int size = _IOC_SIZE(cmd);  	void __user *ubuf = (void __user *)arg; +	kuid_t curr_euid = current_euid();  	/*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ @@ -2570,6 +2621,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  	switch (cmd) {  	case BINDER_WRITE_READ: {  		struct binder_write_read bwr; +  		if (size != sizeof(struct binder_write_read)) {  			ret = -EINVAL;  			goto err; @@ -2579,12 +2631,16 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  			goto err;  		}  		binder_debug(BINDER_DEBUG_READ_WRITE, -			     "%d:%d write %zd at %016lx, read %zd at %016lx\n", -			     proc->pid, thread->pid, bwr.write_size, -			     bwr.write_buffer, bwr.read_size, bwr.read_buffer); +			     "%d:%d write %lld at %016llx, read %lld at %016llx\n", +			     proc->pid, thread->pid, +			     (u64)bwr.write_size, (u64)bwr.write_buffer, +			     (u64)bwr.read_size, (u64)bwr.read_buffer);  		if (bwr.write_size > 0) { -			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); +			ret = binder_thread_write(proc, thread, +						  bwr.write_buffer, +						  bwr.write_size, +						  &bwr.write_consumed);  			trace_binder_write_done(ret);  			if (ret < 0) {  				bwr.read_consumed = 0; @@ -2594,7 +2650,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  			}  		}  		if (bwr.read_size > 0) { -			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); +			ret = binder_thread_read(proc, thread, bwr.read_buffer, +						 bwr.read_size, +						 &bwr.read_consumed, +						 filp->f_flags & O_NONBLOCK);  			trace_binder_read_done(ret);  			if (!list_empty(&proc->todo))  				wake_up_interruptible(&proc->wait); @@ -2605,9 +2664,10 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  			}  		}  		binder_debug(BINDER_DEBUG_READ_WRITE, -			     "%d:%d wrote %zd of %zd, read return %zd of %zd\n", -			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, -			     bwr.read_consumed, bwr.read_size); +			     "%d:%d wrote %lld of %lld, read return %lld of %lld\n", +			     proc->pid, thread->pid, +			     (u64)bwr.write_consumed, (u64)bwr.write_size, +			     (u64)bwr.read_consumed, (u64)bwr.read_size);  		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {  			ret = -EFAULT;  			goto err; @@ -2627,16 +2687,17 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  			goto err;  		}  		if (uid_valid(binder_context_mgr_uid)) { -			if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { +			if (!uid_eq(binder_context_mgr_uid, curr_euid)) {  				pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", -				       from_kuid(&init_user_ns, current->cred->euid), +				       from_kuid(&init_user_ns, curr_euid),  				       from_kuid(&init_user_ns, binder_context_mgr_uid));  				ret = -EPERM;  				goto err;  			} -		} else -			binder_context_mgr_uid = current->cred->euid; -		binder_context_mgr_node = binder_new_node(proc, NULL, NULL); +		} else { +			binder_context_mgr_uid = curr_euid; +		} +		binder_context_mgr_node = binder_new_node(proc, 0, 0);  		if (binder_context_mgr_node == NULL) {  			ret = -ENOMEM;  			goto err; @@ -2652,16 +2713,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  		binder_free_thread(proc, thread);  		thread = NULL;  		break; -	case BINDER_VERSION: +	case BINDER_VERSION: { +		struct binder_version __user *ver = ubuf; +  		if (size != sizeof(struct binder_version)) {  			ret = -EINVAL;  			goto err;  		} -		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { +		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, +			     &ver->protocol_version)) {  			ret = -EINVAL;  			goto err;  		}  		break; +	}  	default:  		ret = -EINVAL;  		goto err; @@ -2682,6 +2747,7 @@ err_unlocked:  static void binder_vma_open(struct vm_area_struct *vma)  {  	struct binder_proc *proc = vma->vm_private_data; +  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,  		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",  		     proc->pid, vma->vm_start, vma->vm_end, @@ -2692,6 +2758,7 @@ static void binder_vma_open(struct vm_area_struct *vma)  static void binder_vma_close(struct vm_area_struct *vma)  {  	struct binder_proc *proc = vma->vm_private_data; +  	binder_debug(BINDER_DEBUG_OPEN_CLOSE,  		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",  		     proc->pid, vma->vm_start, vma->vm_end, @@ -2834,6 +2901,7 @@ static int binder_open(struct inode *nodp, struct file *filp)  	if (binder_debugfs_dir_entry_proc) {  		char strbuf[11]; +  		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);  		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,  			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); @@ -2855,8 +2923,10 @@ static void binder_deferred_flush(struct binder_proc *proc)  {  	struct rb_node *n;  	int wake_count = 0; +  	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {  		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); +  		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;  		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {  			wake_up_interruptible(&thread->wait); @@ -2873,6 +2943,7 @@ static void binder_deferred_flush(struct binder_proc *proc)  static int binder_release(struct inode *nodp, struct file *filp)  {  	struct binder_proc *proc = filp->private_data; +  	debugfs_remove(proc->debugfs_entry);  	binder_defer_work(proc, BINDER_DEFERRED_RELEASE); @@ -2903,7 +2974,7 @@ static int binder_node_release(struct binder_node *node, int refs)  		refs++;  		if (!ref->death) -			goto out; +			continue;  		death++; @@ -2916,7 +2987,6 @@ static int binder_node_release(struct binder_node *node, int refs)  			BUG();  	} -out:  	binder_debug(BINDER_DEBUG_DEAD_BINDER,  		     "node %d now dead, refs %d, death %d\n",  		     node->debug_id, refs, death); @@ -3035,6 +3105,7 @@ static void binder_deferred_func(struct work_struct *work)  	struct files_struct *files;  	int defer; +  	do {  		binder_lock(__func__);  		mutex_lock(&binder_deferred_lock); @@ -3132,8 +3203,9 @@ static void print_binder_work(struct seq_file *m, const char *prefix,  		break;  	case BINDER_WORK_NODE:  		node = container_of(w, struct binder_node, work); -		seq_printf(m, "%snode work %d: u%p c%p\n", -			   prefix, node->debug_id, node->ptr, node->cookie); +		seq_printf(m, "%snode work %d: u%016llx c%016llx\n", +			   prefix, node->debug_id, +			   (u64)node->ptr, (u64)node->cookie);  		break;  	case BINDER_WORK_DEAD_BINDER:  		seq_printf(m, "%shas dead binder\n", prefix); @@ -3193,8 +3265,8 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)  	hlist_for_each_entry(ref, &node->refs, node_entry)  		count++; -	seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", -		   node->debug_id, node->ptr, node->cookie, +	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", +		   node->debug_id, (u64)node->ptr, (u64)node->cookie,  		   node->has_strong_ref, node->has_weak_ref,  		   node->local_strong_refs, node->local_weak_refs,  		   node->internal_strong_refs, count); @@ -3496,6 +3568,7 @@ static const struct file_operations binder_fops = {  	.owner = THIS_MODULE,  	.poll = binder_poll,  	.unlocked_ioctl = binder_ioctl, +	.compat_ioctl = binder_ioctl,  	.mmap = binder_mmap,  	.open = binder_open,  	.flush = binder_flush, diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h index cbe34516806..eb0834656df 100644 --- a/drivers/staging/android/binder.h +++ b/drivers/staging/android/binder.h @@ -20,311 +20,11 @@  #ifndef _LINUX_BINDER_H  #define _LINUX_BINDER_H -#include <linux/ioctl.h> +#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT +#define BINDER_IPC_32BIT 1 +#endif -#define B_PACK_CHARS(c1, c2, c3, c4) \ -	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) -#define B_TYPE_LARGE 0x85 - -enum { -	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), -	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), -	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), -	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), -	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), -}; - -enum { -	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, -	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, -}; - -/* - * This is the flattened representation of a Binder object for transfer - * between processes.  The 'offsets' supplied as part of a binder transaction - * contains offsets into the data where these structures occur.  The Binder - * driver takes care of re-writing the structure type and data as it moves - * between processes. - */ -struct flat_binder_object { -	/* 8 bytes for large_flat_header. */ -	__u32		type; -	__u32		flags; - -	/* 8 bytes of data. */ -	union { -		void __user	*binder;	/* local object */ -		__u32	    handle;		/* remote object */ -	}; - -	/* extra data associated with local object */ -	void __user		*cookie; -}; - -/* - * On 64-bit platforms where user code may run in 32-bits the driver must - * translate the buffer (and local binder) addresses appropriately. - */ - -struct binder_write_read { -	size_t write_size;	/* bytes to write */ -	size_t write_consumed;	/* bytes consumed by driver */ -	unsigned long	write_buffer; -	size_t read_size;	/* bytes to read */ -	size_t read_consumed;	/* bytes consumed by driver */ -	unsigned long	read_buffer; -}; - -/* Use with BINDER_VERSION, driver fills in fields. */ -struct binder_version { -	/* driver protocol version -- increment with incompatible change */ -	__s32       protocol_version; -}; - -/* This is the current protocol version. */ -#define BINDER_CURRENT_PROTOCOL_VERSION 7 - -#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read) -#define	BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64) -#define	BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32) -#define	BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32) -#define	BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32) -#define	BINDER_THREAD_EXIT		_IOW('b', 8, __s32) -#define BINDER_VERSION			_IOWR('b', 9, struct binder_version) - -/* - * NOTE: Two special error codes you should check for when calling - * in to the driver are: - * - * EINTR -- The operation has been interupted.  This should be - * handled by retrying the ioctl() until a different error code - * is returned. - * - * ECONNREFUSED -- The driver is no longer accepting operations - * from your process.  That is, the process is being destroyed. - * You should handle this by exiting from your process.  Note - * that once this error code is returned, all further calls to - * the driver from any thread will return this same code. - */ - -enum transaction_flags { -	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */ -	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */ -	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */ -	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */ -}; - -struct binder_transaction_data { -	/* The first two are only used for bcTRANSACTION and brTRANSACTION, -	 * identifying the target and contents of the transaction. -	 */ -	union { -		__u32	handle;	/* target descriptor of command transaction */ -		void	*ptr;	/* target descriptor of return transaction */ -	} target; -	void		*cookie;	/* target object cookie */ -	__u32		code;		/* transaction command */ - -	/* General information about the transaction. */ -	__u32	        flags; -	pid_t		sender_pid; -	uid_t		sender_euid; -	size_t		data_size;	/* number of bytes of data */ -	size_t		offsets_size;	/* number of bytes of offsets */ - -	/* If this transaction is inline, the data immediately -	 * follows here; otherwise, it ends with a pointer to -	 * the data buffer. -	 */ -	union { -		struct { -			/* transaction data */ -			const void __user	*buffer; -			/* offsets from buffer to flat_binder_object structs */ -			const void __user	*offsets; -		} ptr; -		__u8	buf[8]; -	} data; -}; - -struct binder_ptr_cookie { -	void *ptr; -	void *cookie; -}; - -struct binder_pri_desc { -	__s32 priority; -	__u32 desc; -}; - -struct binder_pri_ptr_cookie { -	__s32 priority; -	void *ptr; -	void *cookie; -}; - -enum binder_driver_return_protocol { -	BR_ERROR = _IOR('r', 0, __s32), -	/* -	 * int: error code -	 */ - -	BR_OK = _IO('r', 1), -	/* No parameters! */ - -	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), -	BR_REPLY = _IOR('r', 3, struct binder_transaction_data), -	/* -	 * binder_transaction_data: the received command. -	 */ - -	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), -	/* -	 * not currently supported -	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. -	 * Else the remote object has acquired a primary reference. -	 */ - -	BR_DEAD_REPLY = _IO('r', 5), -	/* -	 * The target of the last transaction (either a bcTRANSACTION or -	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters. -	 */ - -	BR_TRANSACTION_COMPLETE = _IO('r', 6), -	/* -	 * No parameters... always refers to the last transaction requested -	 * (including replies).  Note that this will be sent even for -	 * asynchronous transactions. -	 */ - -	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), -	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), -	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), -	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), -	/* -	 * void *:	ptr to binder -	 * void *: cookie for binder -	 */ - -	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), -	/* -	 * not currently supported -	 * int:	priority -	 * void *: ptr to binder -	 * void *: cookie for binder -	 */ - -	BR_NOOP = _IO('r', 12), -	/* -	 * No parameters.  Do nothing and examine the next command.  It exists -	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command. -	 */ - -	BR_SPAWN_LOOPER = _IO('r', 13), -	/* -	 * No parameters.  The driver has determined that a process has no -	 * threads waiting to service incoming transactions.  When a process -	 * receives this command, it must spawn a new service thread and -	 * register it via bcENTER_LOOPER. -	 */ - -	BR_FINISHED = _IO('r', 14), -	/* -	 * not currently supported -	 * stop threadpool thread -	 */ - -	BR_DEAD_BINDER = _IOR('r', 15, void *), -	/* -	 * void *: cookie -	 */ -	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), -	/* -	 * void *: cookie -	 */ - -	BR_FAILED_REPLY = _IO('r', 17), -	/* -	 * The the last transaction (either a bcTRANSACTION or -	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters. -	 */ -}; - -enum binder_driver_command_protocol { -	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), -	BC_REPLY = _IOW('c', 1, struct binder_transaction_data), -	/* -	 * binder_transaction_data: the sent command. -	 */ - -	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), -	/* -	 * not currently supported -	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful. -	 * Else you have acquired a primary reference on the object. -	 */ - -	BC_FREE_BUFFER = _IOW('c', 3, void *), -	/* -	 * void *: ptr to transaction data received on a read -	 */ - -	BC_INCREFS = _IOW('c', 4, __u32), -	BC_ACQUIRE = _IOW('c', 5, __u32), -	BC_RELEASE = _IOW('c', 6, __u32), -	BC_DECREFS = _IOW('c', 7, __u32), -	/* -	 * int:	descriptor -	 */ - -	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), -	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), -	/* -	 * void *: ptr to binder -	 * void *: cookie for binder -	 */ - -	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), -	/* -	 * not currently supported -	 * int: priority -	 * int: descriptor -	 */ - -	BC_REGISTER_LOOPER = _IO('c', 11), -	/* -	 * No parameters. -	 * Register a spawned looper thread with the device. -	 */ - -	BC_ENTER_LOOPER = _IO('c', 12), -	BC_EXIT_LOOPER = _IO('c', 13), -	/* -	 * No parameters. -	 * These two commands are sent as an application-level thread -	 * enters and exits the binder loop, respectively.  They are -	 * used so the binder can have an accurate count of the number -	 * of looping threads it has available. -	 */ - -	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), -	/* -	 * void *: ptr to binder -	 * void *: cookie -	 */ - -	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), -	/* -	 * void *: ptr to binder -	 * void *: cookie -	 */ - -	BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), -	/* -	 * void *: cookie -	 */ -}; +#include "uapi/binder.h"  #endif /* _LINUX_BINDER_H */ diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h index 82a567c2af6..7f20f3dc836 100644 --- a/drivers/staging/android/binder_trace.h +++ b/drivers/staging/android/binder_trace.h @@ -152,7 +152,7 @@ TRACE_EVENT(binder_transaction_node_to_ref,  	TP_STRUCT__entry(  		__field(int, debug_id)  		__field(int, node_debug_id) -		__field(void __user *, node_ptr) +		__field(binder_uintptr_t, node_ptr)  		__field(int, ref_debug_id)  		__field(uint32_t, ref_desc)  	), @@ -163,8 +163,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,  		__entry->ref_debug_id = ref->debug_id;  		__entry->ref_desc = ref->desc;  	), -	TP_printk("transaction=%d node=%d src_ptr=0x%p ==> dest_ref=%d dest_desc=%d", -		  __entry->debug_id, __entry->node_debug_id, __entry->node_ptr, +	TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", +		  __entry->debug_id, __entry->node_debug_id, +		  (u64)__entry->node_ptr,  		  __entry->ref_debug_id, __entry->ref_desc)  ); @@ -177,7 +178,7 @@ TRACE_EVENT(binder_transaction_ref_to_node,  		__field(int, ref_debug_id)  		__field(uint32_t, ref_desc)  		__field(int, node_debug_id) -		__field(void __user *, node_ptr) +		__field(binder_uintptr_t, node_ptr)  	),  	TP_fast_assign(  		__entry->debug_id = t->debug_id; @@ -186,9 +187,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,  		__entry->node_debug_id = ref->node->debug_id;  		__entry->node_ptr = ref->node->ptr;  	), -	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%p", +	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",  		  __entry->debug_id, __entry->node_debug_id, -		  __entry->ref_debug_id, __entry->ref_desc, __entry->node_ptr) +		  __entry->ref_debug_id, __entry->ref_desc, +		  (u64)__entry->node_ptr)  );  TRACE_EVENT(binder_transaction_ref_to_ref, diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 00000000000..0f8fec1f84e --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION +	bool "Ion Memory Manager" +	depends on HAVE_MEMBLOCK +	select GENERIC_ALLOCATOR +	select DMA_SHARED_BUFFER +	---help--- +	  Chose this option to enable the ION Memory Manager, +	  used by Android to efficiently allocate buffers +	  from userspace that can be shared between drivers. +	  If you're not using Android its probably safe to +	  say N here. + +config ION_TEST +	tristate "Ion Test Device" +	depends on ION +	help +	  Choose this option to create a device that can be used to test the +	  kernel and device side ION functions. + +config ION_DUMMY +	bool "Dummy Ion driver" +	depends on ION +	help +	  Provides a dummy ION driver that registers the +	  /dev/ion device and some basic heaps. This can +	  be used for testing the ION infrastructure if +	  one doesn't have access to hardware drivers that +	  use ION. + +config ION_TEGRA +	tristate "Ion for Tegra" +	depends on ARCH_TEGRA && ION +	help +	  Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 00000000000..b56fd2bf2b4 --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ +			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 00000000000..ee3a7380e53 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { +	compat_size_t len; +	compat_size_t align; +	compat_uint_t heap_id_mask; +	compat_uint_t flags; +	compat_int_t handle; +}; + +struct compat_ion_custom_data { +	compat_uint_t cmd; +	compat_ulong_t arg; +}; + +struct compat_ion_handle_data { +	compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC	_IOWR(ION_IOC_MAGIC, 0, \ +				      struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, \ +				      struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM	_IOWR(ION_IOC_MAGIC, 6, \ +				      struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( +			struct compat_ion_allocation_data __user *data32, +			struct ion_allocation_data __user *data) +{ +	compat_size_t s; +	compat_uint_t u; +	compat_int_t i; +	int err; + +	err = get_user(s, &data32->len); +	err |= put_user(s, &data->len); +	err |= get_user(s, &data32->align); +	err |= put_user(s, &data->align); +	err |= get_user(u, &data32->heap_id_mask); +	err |= put_user(u, &data->heap_id_mask); +	err |= get_user(u, &data32->flags); +	err |= put_user(u, &data->flags); +	err |= get_user(i, &data32->handle); +	err |= put_user(i, &data->handle); + +	return err; +} + +static int compat_get_ion_handle_data( +			struct compat_ion_handle_data __user *data32, +			struct ion_handle_data __user *data) +{ +	compat_int_t i; +	int err; + +	err = get_user(i, &data32->handle); +	err |= put_user(i, &data->handle); + +	return err; +} + +static int compat_put_ion_allocation_data( +			struct compat_ion_allocation_data __user *data32, +			struct ion_allocation_data __user *data) +{ +	compat_size_t s; +	compat_uint_t u; +	compat_int_t i; +	int err; + +	err = get_user(s, &data->len); +	err |= put_user(s, &data32->len); +	err |= get_user(s, &data->align); +	err |= put_user(s, &data32->align); +	err |= get_user(u, &data->heap_id_mask); +	err |= put_user(u, &data32->heap_id_mask); +	err |= get_user(u, &data->flags); +	err |= put_user(u, &data32->flags); +	err |= get_user(i, &data->handle); +	err |= put_user(i, &data32->handle); + +	return err; +} + +static int compat_get_ion_custom_data( +			struct compat_ion_custom_data __user *data32, +			struct ion_custom_data __user *data) +{ +	compat_uint_t cmd; +	compat_ulong_t arg; +	int err; + +	err = get_user(cmd, &data32->cmd); +	err |= put_user(cmd, &data->cmd); +	err |= get_user(arg, &data32->arg); +	err |= put_user(arg, &data->arg); + +	return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ +	long ret; + +	if (!filp->f_op || !filp->f_op->unlocked_ioctl) +		return -ENOTTY; + +	switch (cmd) { +	case COMPAT_ION_IOC_ALLOC: +	{ +		struct compat_ion_allocation_data __user *data32; +		struct ion_allocation_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_allocation_data(data32, data); +		if (err) +			return err; +		ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, +							(unsigned long)data); +		err = compat_put_ion_allocation_data(data32, data); +		return ret ? ret : err; +	} +	case COMPAT_ION_IOC_FREE: +	{ +		struct compat_ion_handle_data __user *data32; +		struct ion_handle_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_handle_data(data32, data); +		if (err) +			return err; + +		return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, +							(unsigned long)data); +	} +	case COMPAT_ION_IOC_CUSTOM: { +		struct compat_ion_custom_data __user *data32; +		struct ion_custom_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_custom_data(data32, data); +		if (err) +			return err; + +		return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, +							(unsigned long)data); +	} +	case ION_IOC_SHARE: +	case ION_IOC_MAP: +	case ION_IOC_IMPORT: +	case ION_IOC_SYNC: +		return filp->f_op->unlocked_ioctl(filp, cmd, +						(unsigned long)compat_ptr(arg)); +	default: +		return -ENOIOCTLCMD; +	} +} diff --git a/drivers/staging/android/ram_console.h b/drivers/staging/android/ion/compat_ion.h index 9f1125c1106..c2ad5893dfd 100644 --- a/drivers/staging/android/ram_console.h +++ b/drivers/staging/android/ion/compat_ion.h @@ -1,5 +1,8 @@  /* - * Copyright (C) 2010 Google, Inc. + + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc.   *   * This software is licensed under the terms of the GNU General Public   * License version 2, as published by the Free Software Foundation, and @@ -12,11 +15,16 @@   *   */ -#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ -#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else -struct ram_console_platform_data { -	const char *bootinfo; -}; +#define compat_ion_ioctl  NULL -#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */ +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 00000000000..389b8f67a2e --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1645 @@ +/* + + * drivers/staging/android/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/file.h> +#include <linux/freezer.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/miscdevice.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/debugfs.h> +#include <linux/dma-buf.h> +#include <linux/idr.h> + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev:		the actual misc device + * @buffers:		an rb tree of all the existing buffers + * @buffer_lock:	lock protecting the tree of buffers + * @lock:		rwsem protecting the tree of heaps and clients + * @heaps:		list of all the heaps in the system + * @user_clients:	list of all the clients created from userspace + */ +struct ion_device { +	struct miscdevice dev; +	struct rb_root buffers; +	struct mutex buffer_lock; +	struct rw_semaphore lock; +	struct plist_head heaps; +	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, +			     unsigned long arg); +	struct rb_root clients; +	struct dentry *debug_root; +	struct dentry *heaps_debug_root; +	struct dentry *clients_debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node:		node in the tree of all clients + * @dev:		backpointer to ion device + * @handles:		an rb tree of all the handles in this client + * @idr:		an idr space for allocating handle ids + * @lock:		lock protecting the tree of handles + * @name:		used for debugging + * @display_name:	used for debugging (unique version of @name) + * @display_serial:	used for debugging (to make display_name unique) + * @task:		used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { +	struct rb_node node; +	struct ion_device *dev; +	struct rb_root handles; +	struct idr idr; +	struct mutex lock; +	const char *name; +	char *display_name; +	int display_serial; +	struct task_struct *task; +	pid_t pid; +	struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref:		reference count + * @client:		back pointer to the client the buffer resides in + * @buffer:		pointer to the buffer + * @node:		node in the client's handle rbtree + * @kmap_cnt:		count of times this client has mapped to kernel + * @id:			client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client.  Other fields are never changed after initialization. + */ +struct ion_handle { +	struct kref ref; +	struct ion_client *client; +	struct ion_buffer *buffer; +	struct rb_node node; +	unsigned int kmap_cnt; +	int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ +	return (buffer->flags & ION_FLAG_CACHED) && +		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ +	return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ +	return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ +	return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ +	*page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ +	*page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, +			   struct ion_buffer *buffer) +{ +	struct rb_node **p = &dev->buffers.rb_node; +	struct rb_node *parent = NULL; +	struct ion_buffer *entry; + +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_buffer, node); + +		if (buffer < entry) { +			p = &(*p)->rb_left; +		} else if (buffer > entry) { +			p = &(*p)->rb_right; +		} else { +			pr_err("%s: buffer already found.", __func__); +			BUG(); +		} +	} + +	rb_link_node(&buffer->node, parent, p); +	rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, +				     struct ion_device *dev, +				     unsigned long len, +				     unsigned long align, +				     unsigned long flags) +{ +	struct ion_buffer *buffer; +	struct sg_table *table; +	struct scatterlist *sg; +	int i, ret; + +	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); +	if (!buffer) +		return ERR_PTR(-ENOMEM); + +	buffer->heap = heap; +	buffer->flags = flags; +	kref_init(&buffer->ref); + +	ret = heap->ops->allocate(heap, buffer, len, align, flags); + +	if (ret) { +		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) +			goto err2; + +		ion_heap_freelist_drain(heap, 0); +		ret = heap->ops->allocate(heap, buffer, len, align, +					  flags); +		if (ret) +			goto err2; +	} + +	buffer->dev = dev; +	buffer->size = len; + +	table = heap->ops->map_dma(heap, buffer); +	if (WARN_ONCE(table == NULL, +			"heap->ops->map_dma should return ERR_PTR on error")) +		table = ERR_PTR(-EINVAL); +	if (IS_ERR(table)) { +		heap->ops->free(buffer); +		kfree(buffer); +		return ERR_CAST(table); +	} +	buffer->sg_table = table; +	if (ion_buffer_fault_user_mappings(buffer)) { +		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +		struct scatterlist *sg; +		int i, j, k = 0; + +		buffer->pages = vmalloc(sizeof(struct page *) * num_pages); +		if (!buffer->pages) { +			ret = -ENOMEM; +			goto err1; +		} + +		for_each_sg(table->sgl, sg, table->nents, i) { +			struct page *page = sg_page(sg); + +			for (j = 0; j < sg->length / PAGE_SIZE; j++) +				buffer->pages[k++] = page++; +		} + +		if (ret) +			goto err; +	} + +	buffer->dev = dev; +	buffer->size = len; +	INIT_LIST_HEAD(&buffer->vmas); +	mutex_init(&buffer->lock); +	/* this will set up dma addresses for the sglist -- it is not +	   technically correct as per the dma api -- a specific +	   device isn't really taking ownership here.  However, in practice on +	   our systems the only dma_address space is physical addresses. +	   Additionally, we can't afford the overhead of invalidating every +	   allocation via dma_map_sg. The implicit contract here is that +	   memory comming from the heaps is ready for dma, ie if it has a +	   cached mapping that mapping has been invalidated */ +	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) +		sg_dma_address(sg) = sg_phys(sg); +	mutex_lock(&dev->buffer_lock); +	ion_buffer_add(dev, buffer); +	mutex_unlock(&dev->buffer_lock); +	return buffer; + +err: +	heap->ops->unmap_dma(heap, buffer); +	heap->ops->free(buffer); +err1: +	if (buffer->pages) +		vfree(buffer->pages); +err2: +	kfree(buffer); +	return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ +	if (WARN_ON(buffer->kmap_cnt > 0)) +		buffer->heap->ops->unmap_kernel(buffer->heap, buffer); +	buffer->heap->ops->unmap_dma(buffer->heap, buffer); +	buffer->heap->ops->free(buffer); +	if (buffer->pages) +		vfree(buffer->pages); +	kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ +	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); +	struct ion_heap *heap = buffer->heap; +	struct ion_device *dev = buffer->dev; + +	mutex_lock(&dev->buffer_lock); +	rb_erase(&buffer->node, &dev->buffers); +	mutex_unlock(&dev->buffer_lock); + +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		ion_heap_freelist_add(heap, buffer); +	else +		ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ +	kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ +	return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ +	mutex_lock(&buffer->lock); +	buffer->handle_count++; +	mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ +	/* +	 * when a buffer is removed from a handle, if it is not in +	 * any other handles, copy the taskcomm and the pid of the +	 * process it's being removed from into the buffer.  At this +	 * point there will be no way to track what processes this buffer is +	 * being used by, it only exists as a dma_buf file descriptor. +	 * The taskcomm and pid can provide a debug hint as to where this fd +	 * is in the system +	 */ +	mutex_lock(&buffer->lock); +	buffer->handle_count--; +	BUG_ON(buffer->handle_count < 0); +	if (!buffer->handle_count) { +		struct task_struct *task; + +		task = current->group_leader; +		get_task_comm(buffer->task_comm, task); +		buffer->pid = task_pid_nr(task); +	} +	mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, +				     struct ion_buffer *buffer) +{ +	struct ion_handle *handle; + +	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); +	if (!handle) +		return ERR_PTR(-ENOMEM); +	kref_init(&handle->ref); +	RB_CLEAR_NODE(&handle->node); +	handle->client = client; +	ion_buffer_get(buffer); +	ion_buffer_add_to_handle(buffer); +	handle->buffer = buffer; + +	return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ +	struct ion_handle *handle = container_of(kref, struct ion_handle, ref); +	struct ion_client *client = handle->client; +	struct ion_buffer *buffer = handle->buffer; + +	mutex_lock(&buffer->lock); +	while (handle->kmap_cnt) +		ion_handle_kmap_put(handle); +	mutex_unlock(&buffer->lock); + +	idr_remove(&client->idr, handle->id); +	if (!RB_EMPTY_NODE(&handle->node)) +		rb_erase(&handle->node, &client->handles); + +	ion_buffer_remove_from_handle(buffer); +	ion_buffer_put(buffer); + +	kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ +	return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ +	kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ +	struct ion_client *client = handle->client; +	int ret; + +	mutex_lock(&client->lock); +	ret = kref_put(&handle->ref, ion_handle_destroy); +	mutex_unlock(&client->lock); + +	return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, +					    struct ion_buffer *buffer) +{ +	struct rb_node *n = client->handles.rb_node; + +	while (n) { +		struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + +		if (buffer < entry->buffer) +			n = n->rb_left; +		else if (buffer > entry->buffer) +			n = n->rb_right; +		else +			return entry; +	} +	return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +						int id) +{ +	struct ion_handle *handle; + +	mutex_lock(&client->lock); +	handle = idr_find(&client->idr, id); +	if (handle) +		ion_handle_get(handle); +	mutex_unlock(&client->lock); + +	return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, +				struct ion_handle *handle) +{ +	WARN_ON(!mutex_is_locked(&client->lock)); +	return idr_find(&client->idr, handle->id) == handle; +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ +	int id; +	struct rb_node **p = &client->handles.rb_node; +	struct rb_node *parent = NULL; +	struct ion_handle *entry; + +	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); +	if (id < 0) +		return id; + +	handle->id = id; + +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_handle, node); + +		if (handle->buffer < entry->buffer) +			p = &(*p)->rb_left; +		else if (handle->buffer > entry->buffer) +			p = &(*p)->rb_right; +		else +			WARN(1, "%s: buffer already found.", __func__); +	} + +	rb_link_node(&handle->node, parent, p); +	rb_insert_color(&handle->node, &client->handles); + +	return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, +			     size_t align, unsigned int heap_id_mask, +			     unsigned int flags) +{ +	struct ion_handle *handle; +	struct ion_device *dev = client->dev; +	struct ion_buffer *buffer = NULL; +	struct ion_heap *heap; +	int ret; + +	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, +		 len, align, heap_id_mask, flags); +	/* +	 * traverse the list of heaps available in this system in priority +	 * order.  If the heap type is supported by the client, and matches the +	 * request of the caller allocate from it.  Repeat until allocate has +	 * succeeded or all heaps have been tried +	 */ +	len = PAGE_ALIGN(len); + +	if (!len) +		return ERR_PTR(-EINVAL); + +	down_read(&dev->lock); +	plist_for_each_entry(heap, &dev->heaps, node) { +		/* if the caller didn't specify this heap id */ +		if (!((1 << heap->id) & heap_id_mask)) +			continue; +		buffer = ion_buffer_create(heap, dev, len, align, flags); +		if (!IS_ERR(buffer)) +			break; +	} +	up_read(&dev->lock); + +	if (buffer == NULL) +		return ERR_PTR(-ENODEV); + +	if (IS_ERR(buffer)) +		return ERR_CAST(buffer); + +	handle = ion_handle_create(client, buffer); + +	/* +	 * ion_buffer_create will create a buffer with a ref_cnt of 1, +	 * and ion_handle_create will take a second reference, drop one here +	 */ +	ion_buffer_put(buffer); + +	if (IS_ERR(handle)) +		return handle; + +	mutex_lock(&client->lock); +	ret = ion_handle_add(client, handle); +	mutex_unlock(&client->lock); +	if (ret) { +		ion_handle_put(handle); +		handle = ERR_PTR(ret); +	} + +	return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ +	bool valid_handle; + +	BUG_ON(client != handle->client); + +	mutex_lock(&client->lock); +	valid_handle = ion_handle_validate(client, handle); + +	if (!valid_handle) { +		WARN(1, "%s: invalid handle passed to free.\n", __func__); +		mutex_unlock(&client->lock); +		return; +	} +	mutex_unlock(&client->lock); +	ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, +	     ion_phys_addr_t *addr, size_t *len) +{ +	struct ion_buffer *buffer; +	int ret; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		mutex_unlock(&client->lock); +		return -EINVAL; +	} + +	buffer = handle->buffer; + +	if (!buffer->heap->ops->phys) { +		pr_err("%s: ion_phys is not implemented by this heap.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return -ENODEV; +	} +	mutex_unlock(&client->lock); +	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); +	return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ +	void *vaddr; + +	if (buffer->kmap_cnt) { +		buffer->kmap_cnt++; +		return buffer->vaddr; +	} +	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); +	if (WARN_ONCE(vaddr == NULL, +			"heap->ops->map_kernel should return ERR_PTR on error")) +		return ERR_PTR(-EINVAL); +	if (IS_ERR(vaddr)) +		return vaddr; +	buffer->vaddr = vaddr; +	buffer->kmap_cnt++; +	return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ +	struct ion_buffer *buffer = handle->buffer; +	void *vaddr; + +	if (handle->kmap_cnt) { +		handle->kmap_cnt++; +		return buffer->vaddr; +	} +	vaddr = ion_buffer_kmap_get(buffer); +	if (IS_ERR(vaddr)) +		return vaddr; +	handle->kmap_cnt++; +	return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ +	buffer->kmap_cnt--; +	if (!buffer->kmap_cnt) { +		buffer->heap->ops->unmap_kernel(buffer->heap, buffer); +		buffer->vaddr = NULL; +	} +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ +	struct ion_buffer *buffer = handle->buffer; + +	if (!handle->kmap_cnt) { +		WARN(1, "%s: Double unmap detected! bailing...\n", __func__); +		return; +	} +	handle->kmap_cnt--; +	if (!handle->kmap_cnt) +		ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	void *vaddr; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		pr_err("%s: invalid handle passed to map_kernel.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} + +	buffer = handle->buffer; + +	if (!handle->buffer->heap->ops->map_kernel) { +		pr_err("%s: map_kernel is not implemented by this heap.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-ENODEV); +	} + +	mutex_lock(&buffer->lock); +	vaddr = ion_handle_kmap_get(handle); +	mutex_unlock(&buffer->lock); +	mutex_unlock(&client->lock); +	return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ +	struct ion_buffer *buffer; + +	mutex_lock(&client->lock); +	buffer = handle->buffer; +	mutex_lock(&buffer->lock); +	ion_handle_kmap_put(handle); +	mutex_unlock(&buffer->lock); +	mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ +	struct ion_client *client = s->private; +	struct rb_node *n; +	size_t sizes[ION_NUM_HEAP_IDS] = {0}; +	const char *names[ION_NUM_HEAP_IDS] = {NULL}; +	int i; + +	mutex_lock(&client->lock); +	for (n = rb_first(&client->handles); n; n = rb_next(n)) { +		struct ion_handle *handle = rb_entry(n, struct ion_handle, +						     node); +		unsigned int id = handle->buffer->heap->id; + +		if (!names[id]) +			names[id] = handle->buffer->heap->name; +		sizes[id] += handle->buffer->size; +	} +	mutex_unlock(&client->lock); + +	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); +	for (i = 0; i < ION_NUM_HEAP_IDS; i++) { +		if (!names[i]) +			continue; +		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); +	} +	return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ +	return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { +	.open = ion_debug_client_open, +	.read = seq_read, +	.llseek = seq_lseek, +	.release = single_release, +}; + +static int ion_get_client_serial(const struct rb_root *root, +					const unsigned char *name) +{ +	int serial = -1; +	struct rb_node *node; + +	for (node = rb_first(root); node; node = rb_next(node)) { +		struct ion_client *client = rb_entry(node, struct ion_client, +						node); + +		if (strcmp(client->name, name)) +			continue; +		serial = max(serial, client->display_serial); +	} +	return serial + 1; +} + +struct ion_client *ion_client_create(struct ion_device *dev, +				     const char *name) +{ +	struct ion_client *client; +	struct task_struct *task; +	struct rb_node **p; +	struct rb_node *parent = NULL; +	struct ion_client *entry; +	pid_t pid; + +	if (!name) { +		pr_err("%s: Name cannot be null\n", __func__); +		return ERR_PTR(-EINVAL); +	} + +	get_task_struct(current->group_leader); +	task_lock(current->group_leader); +	pid = task_pid_nr(current->group_leader); +	/* don't bother to store task struct for kernel threads, +	   they can't be killed anyway */ +	if (current->group_leader->flags & PF_KTHREAD) { +		put_task_struct(current->group_leader); +		task = NULL; +	} else { +		task = current->group_leader; +	} +	task_unlock(current->group_leader); + +	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); +	if (!client) +		goto err_put_task_struct; + +	client->dev = dev; +	client->handles = RB_ROOT; +	idr_init(&client->idr); +	mutex_init(&client->lock); +	client->task = task; +	client->pid = pid; +	client->name = kstrdup(name, GFP_KERNEL); +	if (!client->name) +		goto err_free_client; + +	down_write(&dev->lock); +	client->display_serial = ion_get_client_serial(&dev->clients, name); +	client->display_name = kasprintf( +		GFP_KERNEL, "%s-%d", name, client->display_serial); +	if (!client->display_name) { +		up_write(&dev->lock); +		goto err_free_client_name; +	} +	p = &dev->clients.rb_node; +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_client, node); + +		if (client < entry) +			p = &(*p)->rb_left; +		else if (client > entry) +			p = &(*p)->rb_right; +	} +	rb_link_node(&client->node, parent, p); +	rb_insert_color(&client->node, &dev->clients); + +	client->debug_root = debugfs_create_file(client->display_name, 0664, +						dev->clients_debug_root, +						client, &debug_client_fops); +	if (!client->debug_root) { +		char buf[256], *path; +		path = dentry_path(dev->clients_debug_root, buf, 256); +		pr_err("Failed to create client debugfs at %s/%s\n", +			path, client->display_name); +	} + +	up_write(&dev->lock); + +	return client; + +err_free_client_name: +	kfree(client->name); +err_free_client: +	kfree(client); +err_put_task_struct: +	if (task) +		put_task_struct(current->group_leader); +	return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ +	struct ion_device *dev = client->dev; +	struct rb_node *n; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	while ((n = rb_first(&client->handles))) { +		struct ion_handle *handle = rb_entry(n, struct ion_handle, +						     node); +		ion_handle_destroy(&handle->ref); +	} + +	idr_destroy(&client->idr); + +	down_write(&dev->lock); +	if (client->task) +		put_task_struct(client->task); +	rb_erase(&client->node, &dev->clients); +	debugfs_remove_recursive(client->debug_root); +	up_write(&dev->lock); + +	kfree(client->display_name); +	kfree(client->name); +	kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, +			      struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	struct sg_table *table; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		pr_err("%s: invalid handle passed to map_dma.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} +	buffer = handle->buffer; +	table = buffer->sg_table; +	mutex_unlock(&client->lock); +	return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, +				       struct device *dev, +				       enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, +					enum dma_data_direction direction) +{ +	struct dma_buf *dmabuf = attachment->dmabuf; +	struct ion_buffer *buffer = dmabuf->priv; + +	ion_buffer_sync_for_device(buffer, attachment->dev, direction); +	return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, +			      struct sg_table *table, +			      enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, +		size_t size, enum dma_data_direction dir) +{ +	struct scatterlist sg; + +	sg_init_table(&sg, 1); +	sg_set_page(&sg, page, size, 0); +	/* +	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid +	 * for the the targeted device, but this works on the currently targeted +	 * hardware. +	 */ +	sg_dma_address(&sg) = page_to_phys(page); +	dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { +	struct list_head list; +	struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, +				       struct device *dev, +				       enum dma_data_direction dir) +{ +	struct ion_vma_list *vma_list; +	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +	int i; + +	pr_debug("%s: syncing for device %s\n", __func__, +		 dev ? dev_name(dev) : "null"); + +	if (!ion_buffer_fault_user_mappings(buffer)) +		return; + +	mutex_lock(&buffer->lock); +	for (i = 0; i < pages; i++) { +		struct page *page = buffer->pages[i]; + +		if (ion_buffer_page_is_dirty(page)) +			ion_pages_sync_for_device(dev, ion_buffer_page(page), +							PAGE_SIZE, dir); + +		ion_buffer_page_clean(buffer->pages + i); +	} +	list_for_each_entry(vma_list, &buffer->vmas, list) { +		struct vm_area_struct *vma = vma_list->vma; + +		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, +			       NULL); +	} +	mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	unsigned long pfn; +	int ret; + +	mutex_lock(&buffer->lock); +	ion_buffer_page_dirty(buffer->pages + vmf->pgoff); +	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + +	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); +	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); +	mutex_unlock(&buffer->lock); +	if (ret) +		return VM_FAULT_ERROR; + +	return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	struct ion_vma_list *vma_list; + +	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); +	if (!vma_list) +		return; +	vma_list->vma = vma; +	mutex_lock(&buffer->lock); +	list_add(&vma_list->list, &buffer->vmas); +	mutex_unlock(&buffer->lock); +	pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	struct ion_vma_list *vma_list, *tmp; + +	pr_debug("%s\n", __func__); +	mutex_lock(&buffer->lock); +	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { +		if (vma_list->vma != vma) +			continue; +		list_del(&vma_list->list); +		kfree(vma_list); +		pr_debug("%s: deleting %p\n", __func__, vma); +		break; +	} +	mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { +	.open = ion_vm_open, +	.close = ion_vm_close, +	.fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = dmabuf->priv; +	int ret = 0; + +	if (!buffer->heap->ops->map_user) { +		pr_err("%s: this heap does not define a method for mapping to userspace\n", +			__func__); +		return -EINVAL; +	} + +	if (ion_buffer_fault_user_mappings(buffer)) { +		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | +							VM_DONTDUMP; +		vma->vm_private_data = buffer; +		vma->vm_ops = &ion_vma_ops; +		ion_vm_open(vma); +		return 0; +	} + +	if (!(buffer->flags & ION_FLAG_CACHED)) +		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + +	mutex_lock(&buffer->lock); +	/* now map it to userspace */ +	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); +	mutex_unlock(&buffer->lock); + +	if (ret) +		pr_err("%s: failure mapping buffer to userspace\n", +		       __func__); + +	return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, +			       void *ptr) +{ +	return; +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, +					size_t len, +					enum dma_data_direction direction) +{ +	struct ion_buffer *buffer = dmabuf->priv; +	void *vaddr; + +	if (!buffer->heap->ops->map_kernel) { +		pr_err("%s: map kernel is not implemented by this heap.\n", +		       __func__); +		return -ENODEV; +	} + +	mutex_lock(&buffer->lock); +	vaddr = ion_buffer_kmap_get(buffer); +	mutex_unlock(&buffer->lock); +	return PTR_ERR_OR_ZERO(vaddr); +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, +				       size_t len, +				       enum dma_data_direction direction) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	mutex_lock(&buffer->lock); +	ion_buffer_kmap_put(buffer); +	mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { +	.map_dma_buf = ion_map_dma_buf, +	.unmap_dma_buf = ion_unmap_dma_buf, +	.mmap = ion_mmap, +	.release = ion_dma_buf_release, +	.begin_cpu_access = ion_dma_buf_begin_cpu_access, +	.end_cpu_access = ion_dma_buf_end_cpu_access, +	.kmap_atomic = ion_dma_buf_kmap, +	.kunmap_atomic = ion_dma_buf_kunmap, +	.kmap = ion_dma_buf_kmap, +	.kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, +						struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	struct dma_buf *dmabuf; +	bool valid_handle; + +	mutex_lock(&client->lock); +	valid_handle = ion_handle_validate(client, handle); +	if (!valid_handle) { +		WARN(1, "%s: invalid handle passed to share.\n", __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} +	buffer = handle->buffer; +	ion_buffer_get(buffer); +	mutex_unlock(&client->lock); + +	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); +	if (IS_ERR(dmabuf)) { +		ion_buffer_put(buffer); +		return dmabuf; +	} + +	return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ +	struct dma_buf *dmabuf; +	int fd; + +	dmabuf = ion_share_dma_buf(client, handle); +	if (IS_ERR(dmabuf)) +		return PTR_ERR(dmabuf); + +	fd = dma_buf_fd(dmabuf, O_CLOEXEC); +	if (fd < 0) +		dma_buf_put(dmabuf); + +	return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ +	struct dma_buf *dmabuf; +	struct ion_buffer *buffer; +	struct ion_handle *handle; +	int ret; + +	dmabuf = dma_buf_get(fd); +	if (IS_ERR(dmabuf)) +		return ERR_CAST(dmabuf); +	/* if this memory came from ion */ + +	if (dmabuf->ops != &dma_buf_ops) { +		pr_err("%s: can not import dmabuf from another exporter\n", +		       __func__); +		dma_buf_put(dmabuf); +		return ERR_PTR(-EINVAL); +	} +	buffer = dmabuf->priv; + +	mutex_lock(&client->lock); +	/* if a handle exists for this buffer just take a reference to it */ +	handle = ion_handle_lookup(client, buffer); +	if (!IS_ERR(handle)) { +		ion_handle_get(handle); +		mutex_unlock(&client->lock); +		goto end; +	} +	mutex_unlock(&client->lock); + +	handle = ion_handle_create(client, buffer); +	if (IS_ERR(handle)) +		goto end; + +	mutex_lock(&client->lock); +	ret = ion_handle_add(client, handle); +	mutex_unlock(&client->lock); +	if (ret) { +		ion_handle_put(handle); +		handle = ERR_PTR(ret); +	} + +end: +	dma_buf_put(dmabuf); +	return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ +	struct dma_buf *dmabuf; +	struct ion_buffer *buffer; + +	dmabuf = dma_buf_get(fd); +	if (IS_ERR(dmabuf)) +		return PTR_ERR(dmabuf); + +	/* if this memory came from ion */ +	if (dmabuf->ops != &dma_buf_ops) { +		pr_err("%s: can not sync dmabuf from another exporter\n", +		       __func__); +		dma_buf_put(dmabuf); +		return -EINVAL; +	} +	buffer = dmabuf->priv; + +	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, +			       buffer->sg_table->nents, DMA_BIDIRECTIONAL); +	dma_buf_put(dmabuf); +	return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ +	switch (cmd) { +	case ION_IOC_SYNC: +	case ION_IOC_FREE: +	case ION_IOC_CUSTOM: +		return _IOC_WRITE; +	default: +		return _IOC_DIR(cmd); +	} +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ +	struct ion_client *client = filp->private_data; +	struct ion_device *dev = client->dev; +	struct ion_handle *cleanup_handle = NULL; +	int ret = 0; +	unsigned int dir; + +	union { +		struct ion_fd_data fd; +		struct ion_allocation_data allocation; +		struct ion_handle_data handle; +		struct ion_custom_data custom; +	} data; + +	dir = ion_ioctl_dir(cmd); + +	if (_IOC_SIZE(cmd) > sizeof(data)) +		return -EINVAL; + +	if (dir & _IOC_WRITE) +		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) +			return -EFAULT; + +	switch (cmd) { +	case ION_IOC_ALLOC: +	{ +		struct ion_handle *handle; + +		handle = ion_alloc(client, data.allocation.len, +						data.allocation.align, +						data.allocation.heap_id_mask, +						data.allocation.flags); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); + +		data.allocation.handle = handle->id; + +		cleanup_handle = handle; +		break; +	} +	case ION_IOC_FREE: +	{ +		struct ion_handle *handle; + +		handle = ion_handle_get_by_id(client, data.handle.handle); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); +		ion_free(client, handle); +		ion_handle_put(handle); +		break; +	} +	case ION_IOC_SHARE: +	case ION_IOC_MAP: +	{ +		struct ion_handle *handle; + +		handle = ion_handle_get_by_id(client, data.handle.handle); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); +		data.fd.fd = ion_share_dma_buf_fd(client, handle); +		ion_handle_put(handle); +		if (data.fd.fd < 0) +			ret = data.fd.fd; +		break; +	} +	case ION_IOC_IMPORT: +	{ +		struct ion_handle *handle; + +		handle = ion_import_dma_buf(client, data.fd.fd); +		if (IS_ERR(handle)) +			ret = PTR_ERR(handle); +		else +			data.handle.handle = handle->id; +		break; +	} +	case ION_IOC_SYNC: +	{ +		ret = ion_sync_for_device(client, data.fd.fd); +		break; +	} +	case ION_IOC_CUSTOM: +	{ +		if (!dev->custom_ioctl) +			return -ENOTTY; +		ret = dev->custom_ioctl(client, data.custom.cmd, +						data.custom.arg); +		break; +	} +	default: +		return -ENOTTY; +	} + +	if (dir & _IOC_READ) { +		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { +			if (cleanup_handle) +				ion_free(client, cleanup_handle); +			return -EFAULT; +		} +	} +	return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ +	struct ion_client *client = file->private_data; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	ion_client_destroy(client); +	return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ +	struct miscdevice *miscdev = file->private_data; +	struct ion_device *dev = container_of(miscdev, struct ion_device, dev); +	struct ion_client *client; +	char debug_name[64]; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); +	client = ion_client_create(dev, debug_name); +	if (IS_ERR(client)) +		return PTR_ERR(client); +	file->private_data = client; + +	return 0; +} + +static const struct file_operations ion_fops = { +	.owner          = THIS_MODULE, +	.open           = ion_open, +	.release        = ion_release, +	.unlocked_ioctl = ion_ioctl, +	.compat_ioctl   = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, +				   unsigned int id) +{ +	size_t size = 0; +	struct rb_node *n; + +	mutex_lock(&client->lock); +	for (n = rb_first(&client->handles); n; n = rb_next(n)) { +		struct ion_handle *handle = rb_entry(n, +						     struct ion_handle, +						     node); +		if (handle->buffer->heap->id == id) +			size += handle->buffer->size; +	} +	mutex_unlock(&client->lock); +	return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ +	struct ion_heap *heap = s->private; +	struct ion_device *dev = heap->dev; +	struct rb_node *n; +	size_t total_size = 0; +	size_t total_orphaned_size = 0; + +	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); +	seq_puts(s, "----------------------------------------------------\n"); + +	for (n = rb_first(&dev->clients); n; n = rb_next(n)) { +		struct ion_client *client = rb_entry(n, struct ion_client, +						     node); +		size_t size = ion_debug_heap_total(client, heap->id); + +		if (!size) +			continue; +		if (client->task) { +			char task_comm[TASK_COMM_LEN]; + +			get_task_comm(task_comm, client->task); +			seq_printf(s, "%16.s %16u %16zu\n", task_comm, +				   client->pid, size); +		} else { +			seq_printf(s, "%16.s %16u %16zu\n", client->name, +				   client->pid, size); +		} +	} +	seq_puts(s, "----------------------------------------------------\n"); +	seq_puts(s, "orphaned allocations (info is from last known client):\n"); +	mutex_lock(&dev->buffer_lock); +	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { +		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, +						     node); +		if (buffer->heap->id != heap->id) +			continue; +		total_size += buffer->size; +		if (!buffer->handle_count) { +			seq_printf(s, "%16.s %16u %16zu %d %d\n", +				   buffer->task_comm, buffer->pid, +				   buffer->size, buffer->kmap_cnt, +				   atomic_read(&buffer->ref.refcount)); +			total_orphaned_size += buffer->size; +		} +	} +	mutex_unlock(&dev->buffer_lock); +	seq_puts(s, "----------------------------------------------------\n"); +	seq_printf(s, "%16.s %16zu\n", "total orphaned", +		   total_orphaned_size); +	seq_printf(s, "%16.s %16zu\n", "total ", total_size); +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		seq_printf(s, "%16.s %16zu\n", "deferred free", +				heap->free_list_size); +	seq_puts(s, "----------------------------------------------------\n"); + +	if (heap->debug_show) +		heap->debug_show(heap, s, unused); + +	return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ +	return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { +	.open = ion_debug_heap_open, +	.read = seq_read, +	.llseek = seq_lseek, +	.release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ +	struct ion_heap *heap = data; +	struct shrink_control sc; +	int objs; + +	sc.gfp_mask = -1; +	sc.nr_to_scan = 0; + +	if (!val) +		return 0; + +	objs = heap->shrinker.shrink(&heap->shrinker, &sc); +	sc.nr_to_scan = objs; + +	heap->shrinker.shrink(&heap->shrinker, &sc); +	return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ +	struct ion_heap *heap = data; +	struct shrink_control sc; +	int objs; + +	sc.gfp_mask = -1; +	sc.nr_to_scan = 0; + +	objs = heap->shrinker.shrink(&heap->shrinker, &sc); +	*val = objs; +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, +			debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ +	struct dentry *debug_file; + +	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || +	    !heap->ops->unmap_dma) +		pr_err("%s: can not add heap with invalid ops struct.\n", +		       __func__); + +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		ion_heap_init_deferred_free(heap); + +	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) +		ion_heap_init_shrinker(heap); + +	heap->dev = dev; +	down_write(&dev->lock); +	/* use negative heap->id to reverse the priority -- when traversing +	   the list later attempt higher id numbers first */ +	plist_node_init(&heap->node, -heap->id); +	plist_add(&heap->node, &dev->heaps); +	debug_file = debugfs_create_file(heap->name, 0664, +					dev->heaps_debug_root, heap, +					&debug_heap_fops); + +	if (!debug_file) { +		char buf[256], *path; + +		path = dentry_path(dev->heaps_debug_root, buf, 256); +		pr_err("Failed to create heap debugfs at %s/%s\n", +			path, heap->name); +	} + +#ifdef DEBUG_HEAP_SHRINKER +	if (heap->shrinker.shrink) { +		char debug_name[64]; + +		snprintf(debug_name, 64, "%s_shrink", heap->name); +		debug_file = debugfs_create_file( +			debug_name, 0644, dev->heaps_debug_root, heap, +			&debug_shrink_fops); +		if (!debug_file) { +			char buf[256], *path; + +			path = dentry_path(dev->heaps_debug_root, buf, 256); +			pr_err("Failed to create heap shrinker debugfs at %s/%s\n", +				path, debug_name); +		} +	} +#endif +	up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) +				     (struct ion_client *client, +				      unsigned int cmd, +				      unsigned long arg)) +{ +	struct ion_device *idev; +	int ret; + +	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); +	if (!idev) +		return ERR_PTR(-ENOMEM); + +	idev->dev.minor = MISC_DYNAMIC_MINOR; +	idev->dev.name = "ion"; +	idev->dev.fops = &ion_fops; +	idev->dev.parent = NULL; +	ret = misc_register(&idev->dev); +	if (ret) { +		pr_err("ion: failed to register misc device.\n"); +		return ERR_PTR(ret); +	} + +	idev->debug_root = debugfs_create_dir("ion", NULL); +	if (!idev->debug_root) { +		pr_err("ion: failed to create debugfs root directory.\n"); +		goto debugfs_done; +	} +	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); +	if (!idev->heaps_debug_root) { +		pr_err("ion: failed to create debugfs heaps directory.\n"); +		goto debugfs_done; +	} +	idev->clients_debug_root = debugfs_create_dir("clients", +						idev->debug_root); +	if (!idev->clients_debug_root) +		pr_err("ion: failed to create debugfs clients directory.\n"); + +debugfs_done: + +	idev->custom_ioctl = custom_ioctl; +	idev->buffers = RB_ROOT; +	mutex_init(&idev->buffer_lock); +	init_rwsem(&idev->lock); +	plist_head_init(&idev->heaps); +	idev->clients = RB_ROOT; +	return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ +	misc_deregister(&dev->dev); +	debugfs_remove_recursive(dev->debug_root); +	/* XXX need to free the heaps and clients ? */ +	kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ +	int i; + +	for (i = 0; i < data->nr; i++) { +		if (data->heaps[i].size == 0) +			continue; + +		if (data->heaps[i].base == 0) { +			phys_addr_t paddr; + +			paddr = memblock_alloc_base(data->heaps[i].size, +						    data->heaps[i].align, +						    MEMBLOCK_ALLOC_ANYWHERE); +			if (!paddr) { +				pr_err("%s: error allocating memblock for heap %d\n", +					__func__, i); +				continue; +			} +			data->heaps[i].base = paddr; +		} else { +			int ret = memblock_reserve(data->heaps[i].base, +					       data->heaps[i].size); +			if (ret) +				pr_err("memblock reserve of %zx@%lx failed\n", +				       data->heaps[i].size, +				       data->heaps[i].base); +		} +		pr_info("%s: %s reserved base %lx size %zu\n", __func__, +			data->heaps[i].name, +			data->heaps[i].base, +			data->heaps[i].size); +	} +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 00000000000..dcd2a0cdb19 --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,204 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include <linux/types.h> + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully +   plumbed in the kernel, and all instances of ion_phys_addr_t should +   be converted to phys_addr_t.  For the time being many kernel interfaces +   do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type:	type of the heap from ion_heap_type enum + * @id:		unique identifier for heap.  When allocating higher numbers + *		will be allocated from first.  At allocation these are passed + *		as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name:	used for debug purposes + * @base:	base address of heap in physical memory if applicable + * @size:	size of the heap in bytes if applicable + * @align:	required alignment in physical memory if applicable + * @priv:	private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { +	enum ion_heap_type type; +	unsigned int id; +	const char *name; +	ion_phys_addr_t base; +	size_t size; +	ion_phys_addr_t align; +	void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr:		number of structures in the array + * @heaps:	array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { +	int nr; +	struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data:	platform data specifying starting physical address and + *		size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() -  allocate a client and returns it + * @dev:		the global ion device + * @heap_type_mask:	mask of heaps this client can allocate from + * @name:		used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, +				     const char *name); + +/** + * ion_client_destroy() -  free's a client and all it's handles + * @client:	the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client:		the client + * @len:		size of the allocation + * @align:		requested allocation alignment, lots of hardware blocks + *			have alignment requirements of some kind + * @heap_id_mask:	mask of heaps to allocate from, if multiple bits are set + *			heaps will be tried in order from highest to lowest + *			id + * @flags:		heap flags, the low 16 bits are consumed by ion, the + *			high 16 bits are passed on to the respective heap and + *			can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, +			     size_t align, unsigned int heap_id_mask, +			     unsigned int flags); + +/** + * ion_free - free a handle + * @client:	the client + * @handle:	the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client:	the client + * @handle:	the handle + * @addr:	a pointer to put the address in + * @len:	a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address.  It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead.  Returns -EINVAL if the handle is invalid.  This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, +	     ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client:	the client + * @handle:	the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, +			      struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client:	the client + * @handle:	handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client:	the client + * @handle:	handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client:	the client + * @handle:	the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, +						struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client:	the client + * @handle:	the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client:	the client + * @fd:		the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it.  If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 00000000000..dcb6f2196c8 --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/staging/android/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { +	struct ion_heap heap; +	struct gen_pool *pool; +	ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, +				      unsigned long size, +				      unsigned long align) +{ +	struct ion_carveout_heap *carveout_heap = +		container_of(heap, struct ion_carveout_heap, heap); +	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + +	if (!offset) +		return ION_CARVEOUT_ALLOCATE_FAIL; + +	return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, +		       unsigned long size) +{ +	struct ion_carveout_heap *carveout_heap = +		container_of(heap, struct ion_carveout_heap, heap); + +	if (addr == ION_CARVEOUT_ALLOCATE_FAIL) +		return; +	gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, +				  struct ion_buffer *buffer, +				  ion_phys_addr_t *addr, size_t *len) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + +	*addr = paddr; +	*len = buffer->size; +	return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long size, unsigned long align, +				      unsigned long flags) +{ +	struct sg_table *table; +	ion_phys_addr_t paddr; +	int ret; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		return -ENOMEM; +	ret = sg_alloc_table(table, 1, GFP_KERNEL); +	if (ret) +		goto err_free; + +	paddr = ion_carveout_allocate(heap, size, align); +	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { +		ret = -ENOMEM; +		goto err_free_table; +	} + +	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); +	buffer->priv_virt = table; + +	return 0; + +err_free_table: +	sg_free_table(table); +err_free: +	kfree(table); +	return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ +	struct ion_heap *heap = buffer->heap; +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + +	ion_heap_buffer_zero(buffer); + +	if (ion_buffer_cached(buffer)) +		dma_sync_sg_for_device(NULL, table->sgl, table->nents, +							DMA_BIDIRECTIONAL); + +	ion_carveout_free(heap, paddr, buffer->size); +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, +						  struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, +					struct ion_buffer *buffer) +{ +	return; +} + +static struct ion_heap_ops carveout_heap_ops = { +	.allocate = ion_carveout_heap_allocate, +	.free = ion_carveout_heap_free, +	.phys = ion_carveout_heap_phys, +	.map_dma = ion_carveout_heap_map_dma, +	.unmap_dma = ion_carveout_heap_unmap_dma, +	.map_user = ion_heap_map_user, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_carveout_heap *carveout_heap; +	int ret; + +	struct page *page; +	size_t size; + +	page = pfn_to_page(PFN_DOWN(heap_data->base)); +	size = heap_data->size; + +	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + +	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); +	if (ret) +		return ERR_PTR(ret); + +	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); +	if (!carveout_heap) +		return ERR_PTR(-ENOMEM); + +	carveout_heap->pool = gen_pool_create(12, -1); +	if (!carveout_heap->pool) { +		kfree(carveout_heap); +		return ERR_PTR(-ENOMEM); +	} +	carveout_heap->base = heap_data->base; +	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, +		     -1); +	carveout_heap->heap.ops = &carveout_heap_ops; +	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; +	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + +	return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ +	struct ion_carveout_heap *carveout_heap = +	     container_of(heap, struct  ion_carveout_heap, heap); + +	gen_pool_destroy(carveout_heap->pool); +	kfree(carveout_heap); +	carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 00000000000..3f2c12ba4d1 --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { +	struct ion_heap heap; +	struct gen_pool *pool; +	ion_phys_addr_t base; +	unsigned long chunk_size; +	unsigned long size; +	unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long size, unsigned long align, +				      unsigned long flags) +{ +	struct ion_chunk_heap *chunk_heap = +		container_of(heap, struct ion_chunk_heap, heap); +	struct sg_table *table; +	struct scatterlist *sg; +	int ret, i; +	unsigned long num_chunks; +	unsigned long allocated_size; + +	if (align > chunk_heap->chunk_size) +		return -EINVAL; + +	allocated_size = ALIGN(size, chunk_heap->chunk_size); +	num_chunks = allocated_size / chunk_heap->chunk_size; + +	if (allocated_size > chunk_heap->size - chunk_heap->allocated) +		return -ENOMEM; + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		return -ENOMEM; +	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); +	if (ret) { +		kfree(table); +		return ret; +	} + +	sg = table->sgl; +	for (i = 0; i < num_chunks; i++) { +		unsigned long paddr = gen_pool_alloc(chunk_heap->pool, +						     chunk_heap->chunk_size); +		if (!paddr) +			goto err; +		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), +				chunk_heap->chunk_size, 0); +		sg = sg_next(sg); +	} + +	buffer->priv_virt = table; +	chunk_heap->allocated += allocated_size; +	return 0; +err: +	sg = table->sgl; +	for (i -= 1; i >= 0; i--) { +		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), +			      sg->length); +		sg = sg_next(sg); +	} +	sg_free_table(table); +	kfree(table); +	return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ +	struct ion_heap *heap = buffer->heap; +	struct ion_chunk_heap *chunk_heap = +		container_of(heap, struct ion_chunk_heap, heap); +	struct sg_table *table = buffer->priv_virt; +	struct scatterlist *sg; +	int i; +	unsigned long allocated_size; + +	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + +	ion_heap_buffer_zero(buffer); + +	if (ion_buffer_cached(buffer)) +		dma_sync_sg_for_device(NULL, table->sgl, table->nents, +								DMA_BIDIRECTIONAL); + +	for_each_sg(table->sgl, sg, table->nents, i) { +		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), +			      sg->length); +	} +	chunk_heap->allocated -= allocated_size; +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, +					       struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, +				     struct ion_buffer *buffer) +{ +	return; +} + +static struct ion_heap_ops chunk_heap_ops = { +	.allocate = ion_chunk_heap_allocate, +	.free = ion_chunk_heap_free, +	.map_dma = ion_chunk_heap_map_dma, +	.unmap_dma = ion_chunk_heap_unmap_dma, +	.map_user = ion_heap_map_user, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_chunk_heap *chunk_heap; +	int ret; +	struct page *page; +	size_t size; + +	page = pfn_to_page(PFN_DOWN(heap_data->base)); +	size = heap_data->size; + +	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + +	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); +	if (ret) +		return ERR_PTR(ret); + +	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); +	if (!chunk_heap) +		return ERR_PTR(-ENOMEM); + +	chunk_heap->chunk_size = (unsigned long)heap_data->priv; +	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + +					   PAGE_SHIFT, -1); +	if (!chunk_heap->pool) { +		ret = -ENOMEM; +		goto error_gen_pool_create; +	} +	chunk_heap->base = heap_data->base; +	chunk_heap->size = heap_data->size; +	chunk_heap->allocated = 0; + +	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); +	chunk_heap->heap.ops = &chunk_heap_ops; +	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; +	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; +	pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, +		heap_data->size, heap_data->align); + +	return &chunk_heap->heap; + +error_gen_pool_create: +	kfree(chunk_heap); +	return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ +	struct ion_chunk_heap *chunk_heap = +	     container_of(heap, struct  ion_chunk_heap, heap); + +	gen_pool_destroy(chunk_heap->pool); +	kfree(chunk_heap); +	chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 00000000000..ce68ecfed31 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,218 @@ +/* + * drivers/staging/android/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { +	struct ion_heap heap; +	struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { +	void *cpu_addr; +	dma_addr_t handle; +	struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, +			       void *cpu_addr, dma_addr_t handle, size_t size) +{ +	struct page *page = virt_to_page(cpu_addr); +	int ret; + +	ret = sg_alloc_table(sgt, 1, GFP_KERNEL); +	if (unlikely(ret)) +		return ret; + +	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); +	return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, +			    unsigned long len, unsigned long align, +			    unsigned long flags) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info; + +	dev_dbg(dev, "Request buffer allocation len %ld\n", len); + +	if (buffer->flags & ION_FLAG_CACHED) +		return -EINVAL; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); +	if (!info) { +		dev_err(dev, "Can't allocate buffer info\n"); +		return ION_CMA_ALLOCATE_FAILED; +	} + +	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), +						GFP_HIGHUSER | __GFP_ZERO); + +	if (!info->cpu_addr) { +		dev_err(dev, "Fail to allocate buffer\n"); +		goto err; +	} + +	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!info->table) { +		dev_err(dev, "Fail to allocate sg table\n"); +		goto free_mem; +	} + +	if (ion_cma_get_sgtable +	    (dev, info->table, info->cpu_addr, info->handle, len)) +		goto free_table; +	/* keep this for memory release */ +	buffer->priv_virt = info; +	dev_dbg(dev, "Allocate buffer %p\n", buffer); +	return 0; + +free_table: +	kfree(info->table); +free_mem: +	dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: +	kfree(info); +	return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	dev_dbg(dev, "Release buffer %p\n", buffer); +	/* release memory */ +	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); +	/* release sg table */ +	sg_free_table(info->table); +	kfree(info->table); +	kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, +			ion_phys_addr_t *addr, size_t *len) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, +		&info->handle); + +	*addr = info->handle; +	*len = buffer->size; + +	return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, +					     struct ion_buffer *buffer) +{ +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, +				   struct ion_buffer *buffer) +{ +	return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, +			struct vm_area_struct *vma) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, +				 buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, +				struct ion_buffer *buffer) +{ +	struct ion_cma_buffer_info *info = buffer->priv_virt; +	/* kernel memory mapping has been done at allocation time */ +	return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, +					struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { +	.allocate = ion_cma_allocate, +	.free = ion_cma_free, +	.map_dma = ion_cma_heap_map_dma, +	.unmap_dma = ion_cma_heap_unmap_dma, +	.phys = ion_cma_phys, +	.map_user = ion_cma_mmap, +	.map_kernel = ion_cma_map_kernel, +	.unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ +	struct ion_cma_heap *cma_heap; + +	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + +	if (!cma_heap) +		return ERR_PTR(-ENOMEM); + +	cma_heap->heap.ops = &ion_cma_ops; +	/* get device from private heaps data, later it will be +	 * used to make the link with reserved CMA memory */ +	cma_heap->dev = data->priv; +	cma_heap->heap.type = ION_HEAP_TYPE_DMA; +	return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(heap); + +	kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 00000000000..3a45e79fe44 --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,158 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/sizes.h> +#include <linux/io.h> +#include "ion.h" +#include "ion_priv.h" + +static struct ion_device *idev; +static struct ion_heap **heaps; + +static void *carveout_ptr; +static void *chunk_ptr; + +static struct ion_platform_heap dummy_heaps[] = { +		{ +			.id	= ION_HEAP_TYPE_SYSTEM, +			.type	= ION_HEAP_TYPE_SYSTEM, +			.name	= "system", +		}, +		{ +			.id	= ION_HEAP_TYPE_SYSTEM_CONTIG, +			.type	= ION_HEAP_TYPE_SYSTEM_CONTIG, +			.name	= "system contig", +		}, +		{ +			.id	= ION_HEAP_TYPE_CARVEOUT, +			.type	= ION_HEAP_TYPE_CARVEOUT, +			.name	= "carveout", +			.size	= SZ_4M, +		}, +		{ +			.id	= ION_HEAP_TYPE_CHUNK, +			.type	= ION_HEAP_TYPE_CHUNK, +			.name	= "chunk", +			.size	= SZ_4M, +			.align	= SZ_16K, +			.priv	= (void *)(SZ_16K), +		}, +}; + +static struct ion_platform_data dummy_ion_pdata = { +	.nr = ARRAY_SIZE(dummy_heaps), +	.heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ +	int i, err; + +	idev = ion_device_create(NULL); +	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, +			GFP_KERNEL); +	if (!heaps) +		return -ENOMEM; + + +	/* Allocate a dummy carveout heap */ +	carveout_ptr = alloc_pages_exact( +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, +				GFP_KERNEL); +	if (carveout_ptr) +		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = +						virt_to_phys(carveout_ptr); +	else +		pr_err("ion_dummy: Could not allocate carveout\n"); + +	/* Allocate a dummy chunk heap */ +	chunk_ptr = alloc_pages_exact( +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size, +				GFP_KERNEL); +	if (chunk_ptr) +		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); +	else +		pr_err("ion_dummy: Could not allocate chunk\n"); + +	for (i = 0; i < dummy_ion_pdata.nr; i++) { +		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + +		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && +							!heap_data->base) +			continue; + +		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) +			continue; + +		heaps[i] = ion_heap_create(heap_data); +		if (IS_ERR_OR_NULL(heaps[i])) { +			err = PTR_ERR(heaps[i]); +			goto err; +		} +		ion_device_add_heap(idev, heaps[i]); +	} +	return 0; +err: +	for (i = 0; i < dummy_ion_pdata.nr; i++) { +		if (heaps[i]) +			ion_heap_destroy(heaps[i]); +	} +	kfree(heaps); + +	if (carveout_ptr) { +		free_pages_exact(carveout_ptr, +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); +		carveout_ptr = NULL; +	} +	if (chunk_ptr) { +		free_pages_exact(chunk_ptr, +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size); +		chunk_ptr = NULL; +	} +	return err; +} +device_initcall(ion_dummy_init); + +static void __exit ion_dummy_exit(void) +{ +	int i; + +	ion_device_destroy(idev); + +	for (i = 0; i < dummy_ion_pdata.nr; i++) +		ion_heap_destroy(heaps[i]); +	kfree(heaps); + +	if (carveout_ptr) { +		free_pages_exact(carveout_ptr, +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); +		carveout_ptr = NULL; +	} +	if (chunk_ptr) { +		free_pages_exact(chunk_ptr, +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size); +		chunk_ptr = NULL; +	} + +	return; +} +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 00000000000..4605e04712a --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,383 @@ +/* + * drivers/staging/android/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/freezer.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/scatterlist.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, +			  struct ion_buffer *buffer) +{ +	struct scatterlist *sg; +	int i, j; +	void *vaddr; +	pgprot_t pgprot; +	struct sg_table *table = buffer->sg_table; +	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +	struct page **pages = vmalloc(sizeof(struct page *) * npages); +	struct page **tmp = pages; + +	if (!pages) +		return NULL; + +	if (buffer->flags & ION_FLAG_CACHED) +		pgprot = PAGE_KERNEL; +	else +		pgprot = pgprot_writecombine(PAGE_KERNEL); + +	for_each_sg(table->sgl, sg, table->nents, i) { +		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; +		struct page *page = sg_page(sg); + +		BUG_ON(i >= npages); +		for (j = 0; j < npages_this_entry; j++) +			*(tmp++) = page++; +	} +	vaddr = vmap(pages, npages, VM_MAP, pgprot); +	vfree(pages); + +	if (vaddr == NULL) +		return ERR_PTR(-ENOMEM); + +	return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, +			   struct ion_buffer *buffer) +{ +	vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, +		      struct vm_area_struct *vma) +{ +	struct sg_table *table = buffer->sg_table; +	unsigned long addr = vma->vm_start; +	unsigned long offset = vma->vm_pgoff * PAGE_SIZE; +	struct scatterlist *sg; +	int i; +	int ret; + +	for_each_sg(table->sgl, sg, table->nents, i) { +		struct page *page = sg_page(sg); +		unsigned long remainder = vma->vm_end - addr; +		unsigned long len = sg->length; + +		if (offset >= sg->length) { +			offset -= sg->length; +			continue; +		} else if (offset) { +			page += offset / PAGE_SIZE; +			len = sg->length - offset; +			offset = 0; +		} +		len = min(len, remainder); +		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, +				vma->vm_page_prot); +		if (ret) +			return ret; +		addr += len; +		if (addr >= vma->vm_end) +			return 0; +	} +	return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ +	void *addr = vm_map_ram(pages, num, -1, pgprot); + +	if (!addr) +		return -ENOMEM; +	memset(addr, 0, PAGE_SIZE * num); +	vm_unmap_ram(addr, num); + +	return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, +						pgprot_t pgprot) +{ +	int p = 0; +	int ret = 0; +	struct sg_page_iter piter; +	struct page *pages[32]; + +	for_each_sg_page(sgl, &piter, nents, 0) { +		pages[p++] = sg_page_iter_page(&piter); +		if (p == ARRAY_SIZE(pages)) { +			ret = ion_heap_clear_pages(pages, p, pgprot); +			if (ret) +				return ret; +			p = 0; +		} +	} +	if (p) +		ret = ion_heap_clear_pages(pages, p, pgprot); + +	return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ +	struct sg_table *table = buffer->sg_table; +	pgprot_t pgprot; + +	if (buffer->flags & ION_FLAG_CACHED) +		pgprot = PAGE_KERNEL; +	else +		pgprot = pgprot_writecombine(PAGE_KERNEL); + +	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ +	struct scatterlist sg; + +	sg_init_table(&sg, 1); +	sg_set_page(&sg, page, size, 0); +	return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ +	spin_lock(&heap->free_lock); +	list_add(&buffer->list, &heap->free_list); +	heap->free_list_size += buffer->size; +	spin_unlock(&heap->free_lock); +	wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ +	size_t size; + +	spin_lock(&heap->free_lock); +	size = heap->free_list_size; +	spin_unlock(&heap->free_lock); + +	return size; +} + +static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, +				bool skip_pools) +{ +	struct ion_buffer *buffer; +	size_t total_drained = 0; + +	if (ion_heap_freelist_size(heap) == 0) +		return 0; + +	spin_lock(&heap->free_lock); +	if (size == 0) +		size = heap->free_list_size; + +	while (!list_empty(&heap->free_list)) { +		if (total_drained >= size) +			break; +		buffer = list_first_entry(&heap->free_list, struct ion_buffer, +					  list); +		list_del(&buffer->list); +		heap->free_list_size -= buffer->size; +		if (skip_pools) +			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; +		total_drained += buffer->size; +		spin_unlock(&heap->free_lock); +		ion_buffer_destroy(buffer); +		spin_lock(&heap->free_lock); +	} +	spin_unlock(&heap->free_lock); + +	return total_drained; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ +	return _ion_heap_freelist_drain(heap, size, false); +} + +size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) +{ +	return _ion_heap_freelist_drain(heap, size, true); +} + +static int ion_heap_deferred_free(void *data) +{ +	struct ion_heap *heap = data; + +	while (true) { +		struct ion_buffer *buffer; + +		wait_event_freezable(heap->waitqueue, +				     ion_heap_freelist_size(heap) > 0); + +		spin_lock(&heap->free_lock); +		if (list_empty(&heap->free_list)) { +			spin_unlock(&heap->free_lock); +			continue; +		} +		buffer = list_first_entry(&heap->free_list, struct ion_buffer, +					  list); +		list_del(&buffer->list); +		heap->free_list_size -= buffer->size; +		spin_unlock(&heap->free_lock); +		ion_buffer_destroy(buffer); +	} + +	return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ +	struct sched_param param = { .sched_priority = 0 }; + +	INIT_LIST_HEAD(&heap->free_list); +	heap->free_list_size = 0; +	spin_lock_init(&heap->free_lock); +	init_waitqueue_head(&heap->waitqueue); +	heap->task = kthread_run(ion_heap_deferred_free, heap, +				 "%s", heap->name); +	if (IS_ERR(heap->task)) { +		pr_err("%s: creating thread for deferred free failed\n", +		       __func__); +		return PTR_ERR_OR_ZERO(heap->task); +	} +	sched_setscheduler(heap->task, SCHED_IDLE, ¶m); +	return 0; +} + +static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, +						struct shrink_control *sc) +{ +	struct ion_heap *heap = container_of(shrinker, struct ion_heap, +					     shrinker); +	int total = 0; + +	total = ion_heap_freelist_size(heap) / PAGE_SIZE; +	if (heap->ops->shrink) +		total += heap->ops->shrink(heap, sc->gfp_mask, 0); +	return total; +} + +static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, +						struct shrink_control *sc) +{ +	struct ion_heap *heap = container_of(shrinker, struct ion_heap, +					     shrinker); +	int freed = 0; +	int to_scan = sc->nr_to_scan; + +	if (to_scan == 0) +		return 0; + +	/* +	 * shrink the free list first, no point in zeroing the memory if we're +	 * just going to reclaim it. Also, skip any possible page pooling. +	 */ +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / +				PAGE_SIZE; + +	to_scan -= freed; +	if (to_scan <= 0) +		return freed; + +	if (heap->ops->shrink) +		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); +	return freed; +} + +void ion_heap_init_shrinker(struct ion_heap *heap) +{ +	heap->shrinker.count_objects = ion_heap_shrink_count; +	heap->shrinker.scan_objects = ion_heap_shrink_scan; +	heap->shrinker.seeks = DEFAULT_SEEKS; +	heap->shrinker.batch = 0; +	register_shrinker(&heap->shrinker); +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_heap *heap = NULL; + +	switch (heap_data->type) { +	case ION_HEAP_TYPE_SYSTEM_CONTIG: +		heap = ion_system_contig_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_SYSTEM: +		heap = ion_system_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_CARVEOUT: +		heap = ion_carveout_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_CHUNK: +		heap = ion_chunk_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_DMA: +		heap = ion_cma_heap_create(heap_data); +		break; +	default: +		pr_err("%s: Invalid heap type %d\n", __func__, +		       heap_data->type); +		return ERR_PTR(-EINVAL); +	} + +	if (IS_ERR_OR_NULL(heap)) { +		pr_err("%s: error creating heap %s type %d base %lu size %zu\n", +		       __func__, heap_data->name, heap_data->type, +		       heap_data->base, heap_data->size); +		return ERR_PTR(-EINVAL); +	} + +	heap->name = heap_data->name; +	heap->id = heap_data->id; +	return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ +	if (!heap) +		return; + +	switch (heap->type) { +	case ION_HEAP_TYPE_SYSTEM_CONTIG: +		ion_system_contig_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_SYSTEM: +		ion_system_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_CARVEOUT: +		ion_carveout_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_CHUNK: +		ion_chunk_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_DMA: +		ion_cma_heap_destroy(heap); +		break; +	default: +		pr_err("%s: Invalid heap type %d\n", __func__, +		       heap->type); +	} +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 00000000000..5864f3dfcbc --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,182 @@ +/* + * drivers/staging/android/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include "ion_priv.h" + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ +	struct page *page = alloc_pages(pool->gfp_mask, pool->order); + +	if (!page) +		return NULL; +	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, +						DMA_BIDIRECTIONAL); +	return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, +				     struct page *page) +{ +	__free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ +	mutex_lock(&pool->mutex); +	if (PageHighMem(page)) { +		list_add_tail(&page->lru, &pool->high_items); +		pool->high_count++; +	} else { +		list_add_tail(&page->lru, &pool->low_items); +		pool->low_count++; +	} +	mutex_unlock(&pool->mutex); +	return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ +	struct page *page; + +	if (high) { +		BUG_ON(!pool->high_count); +		page = list_first_entry(&pool->high_items, struct page, lru); +		pool->high_count--; +	} else { +		BUG_ON(!pool->low_count); +		page = list_first_entry(&pool->low_items, struct page, lru); +		pool->low_count--; +	} + +	list_del(&page->lru); +	return page; +} + +struct page *ion_page_pool_alloc(struct ion_page_pool *pool) +{ +	struct page *page = NULL; + +	BUG_ON(!pool); + +	mutex_lock(&pool->mutex); +	if (pool->high_count) +		page = ion_page_pool_remove(pool, true); +	else if (pool->low_count) +		page = ion_page_pool_remove(pool, false); +	mutex_unlock(&pool->mutex); + +	if (!page) +		page = ion_page_pool_alloc_pages(pool); + +	return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ +	int ret; + +	BUG_ON(pool->order != compound_order(page)); + +	ret = ion_page_pool_add(pool, page); +	if (ret) +		ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ +	int count = pool->low_count; + +	if (high) +		count += pool->high_count; + +	return count << pool->order; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, +				int nr_to_scan) +{ +	int freed; +	bool high; + +	if (current_is_kswapd()) +		high = 1; +	else +		high = !!(gfp_mask & __GFP_HIGHMEM); + +	if (nr_to_scan == 0) +		return ion_page_pool_total(pool, high); + +	for (freed = 0; freed < nr_to_scan; freed++) { +		struct page *page; + +		mutex_lock(&pool->mutex); +		if (pool->low_count) { +			page = ion_page_pool_remove(pool, false); +		} else if (high && pool->high_count) { +			page = ion_page_pool_remove(pool, true); +		} else { +			mutex_unlock(&pool->mutex); +			break; +		} +		mutex_unlock(&pool->mutex); +		ion_page_pool_free_pages(pool, page); +	} + +	return freed; +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ +	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), +					     GFP_KERNEL); +	if (!pool) +		return NULL; +	pool->high_count = 0; +	pool->low_count = 0; +	INIT_LIST_HEAD(&pool->low_items); +	INIT_LIST_HEAD(&pool->high_items); +	pool->gfp_mask = gfp_mask | __GFP_COMP; +	pool->order = order; +	mutex_init(&pool->mutex); +	plist_node_init(&pool->list, order); + +	return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ +	kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ +	return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 00000000000..c8f01757abf --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,405 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/shrinker.h> +#include <linux/types.h> + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref:		refernce count + * @node:		node in the ion_device buffers tree + * @dev:		back pointer to the ion_device + * @heap:		back pointer to the heap the buffer came from + * @flags:		buffer specific flags + * @private_flags:	internal buffer specific flags + * @size:		size of the buffer + * @priv_virt:		private data to the buffer representable as + *			a void * + * @priv_phys:		private data to the buffer representable as + *			an ion_phys_addr_t (and someday a phys_addr_t) + * @lock:		protects the buffers cnt fields + * @kmap_cnt:		number of times the buffer is mapped to the kernel + * @vaddr:		the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt:		number of times the buffer is mapped for dma + * @sg_table:		the sg table for the buffer if dmap_cnt is not zero + * @pages:		flat array of pages in the buffer -- used by fault + *			handler and only valid for buffers that are faulted in + * @vmas:		list of vma's mapping this buffer + * @handle_count:	count of handles referencing this buffer + * @task_comm:		taskcomm of last client to reference this buffer in a + *			handle, used for debugging + * @pid:		pid of last client to reference this buffer in a + *			handle, used for debugging +*/ +struct ion_buffer { +	struct kref ref; +	union { +		struct rb_node node; +		struct list_head list; +	}; +	struct ion_device *dev; +	struct ion_heap *heap; +	unsigned long flags; +	unsigned long private_flags; +	size_t size; +	union { +		void *priv_virt; +		ion_phys_addr_t priv_phys; +	}; +	struct mutex lock; +	int kmap_cnt; +	void *vaddr; +	int dmap_cnt; +	struct sg_table *sg_table; +	struct page **pages; +	struct list_head vmas; +	/* used to track orphaned buffers */ +	int handle_count; +	char task_comm[TASK_COMM_LEN]; +	pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate:		allocate memory + * @free:		free memory + * @phys		get physical address of a buffer (only define on + *			physically contiguous heaps) + * @map_dma		map the memory for dma to a scatterlist + * @unmap_dma		unmap the memory for dma + * @map_kernel		map memory to the kernel + * @unmap_kernel	unmap memory to the kernel + * @map_user		map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. + */ +struct ion_heap_ops { +	int (*allocate)(struct ion_heap *heap, +			struct ion_buffer *buffer, unsigned long len, +			unsigned long align, unsigned long flags); +	void (*free)(struct ion_buffer *buffer); +	int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, +		    ion_phys_addr_t *addr, size_t *len); +	struct sg_table * (*map_dma)(struct ion_heap *heap, +				     struct ion_buffer *buffer); +	void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); +	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); +	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); +	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, +			struct vm_area_struct *vma); +	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node:		rb node to put the heap on the device's tree of heaps + * @dev:		back pointer to the ion_device + * @type:		type of heap + * @ops:		ops struct as above + * @flags:		flags + * @id:			id of heap, also indicates priority of this heap when + *			allocating.  These are specified by platform data and + *			MUST be unique + * @name:		used for debugging + * @shrinker:		a shrinker for the heap + * @free_list:		free list head if deferred free is used + * @free_list_size	size of the deferred free list in bytes + * @lock:		protects the free list + * @waitqueue:		queue to wait on from deferred free thread + * @task:		task struct of deferred free thread + * @debug_show:		called when heap debug file is read to add any + *			heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made.  In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { +	struct plist_node node; +	struct ion_device *dev; +	enum ion_heap_type type; +	struct ion_heap_ops *ops; +	unsigned long flags; +	unsigned int id; +	const char *name; +	struct shrinker shrinker; +	struct list_head free_list; +	size_t free_list_size; +	spinlock_t free_lock; +	wait_queue_head_t waitqueue; +	struct task_struct *task; + +	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer:		buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer:		buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl:	arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) +				     (struct ion_client *client, +				      unsigned int cmd, +				      unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev:		the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev:		the device + * @heap:		the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, +			struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_shrinker + * @heap:		the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap:		the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap:		the heap + * @buffer:		the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap:		the heap + * @size:		ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed.  The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_shrink - drain the deferred free + *				list, skipping any heap-specific + *				pooling or caching mechanisms + * + * @heap:		the heap + * @size:		amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed.  The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. + */ +size_t ion_heap_freelist_shrink(struct ion_heap *heap, +					size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap:		the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, +				      unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, +		       unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap.  Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant peformance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count:		number of highmem items in the pool + * @low_count:		number of lowmem items in the pool + * @high_items:		list of highmem items + * @low_items:		list of lowmem items + * @mutex:		lock protecting this struct and especially the count + *			item list + * @gfp_mask:		gfp_mask to use from alloc + * @order:		order of pages in the pool + * @list:		plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant peformance benefit + * on many systems + */ +struct ion_page_pool { +	int high_count; +	int low_count; +	struct list_head high_items; +	struct list_head low_items; +	struct mutex mutex; +	gfp_t gfp_mask; +	unsigned int order; +	struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +struct page *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool:		the pool + * @gfp_mask:		the memory type to reclaim + * @nr_to_scan:		number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, +			  int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + *                             device + * @dev:		the device the pages will be used with + * @page:		the first page to be flushed + * @size:		size in bytes of region to be flushed + * @dir:		direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, +		size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 00000000000..cb7ae08a5e2 --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,446 @@ +/* + * drivers/staging/android/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <asm/page.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | +				     __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ +	int i; + +	for (i = 0; i < num_orders; i++) +		if (order == orders[i]) +			return i; +	BUG(); +	return -1; +} + +static inline unsigned int order_to_size(int order) +{ +	return PAGE_SIZE << order; +} + +struct ion_system_heap { +	struct ion_heap heap; +	struct ion_page_pool **pools; +}; + +struct page_info { +	struct page *page; +	unsigned int order; +	struct list_head list; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long order) +{ +	bool cached = ion_buffer_cached(buffer); +	struct ion_page_pool *pool = heap->pools[order_to_index(order)]; +	struct page *page; + +	if (!cached) { +		page = ion_page_pool_alloc(pool); +	} else { +		gfp_t gfp_flags = low_order_gfp_flags; + +		if (order > 4) +			gfp_flags = high_order_gfp_flags; +		page = alloc_pages(gfp_flags | __GFP_COMP, order); +		if (!page) +			return NULL; +		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, +						DMA_BIDIRECTIONAL); +	} + +	return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, +			     struct ion_buffer *buffer, struct page *page, +			     unsigned int order) +{ +	bool cached = ion_buffer_cached(buffer); + +	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { +		struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + +		ion_page_pool_free(pool, page); +	} else { +		__free_pages(page, order); +	} +} + + +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, +						 struct ion_buffer *buffer, +						 unsigned long size, +						 unsigned int max_order) +{ +	struct page *page; +	struct page_info *info; +	int i; + +	info = kmalloc(sizeof(struct page_info), GFP_KERNEL); +	if (!info) +		return NULL; + +	for (i = 0; i < num_orders; i++) { +		if (size < order_to_size(orders[i])) +			continue; +		if (max_order < orders[i]) +			continue; + +		page = alloc_buffer_page(heap, buffer, orders[i]); +		if (!page) +			continue; + +		info->page = page; +		info->order = orders[i]; +		return info; +	} +	kfree(info); + +	return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, +				     struct ion_buffer *buffer, +				     unsigned long size, unsigned long align, +				     unsigned long flags) +{ +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	struct sg_table *table; +	struct scatterlist *sg; +	struct list_head pages; +	struct page_info *info, *tmp_info; +	int i = 0; +	unsigned long size_remaining = PAGE_ALIGN(size); +	unsigned int max_order = orders[0]; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	if (size / PAGE_SIZE > totalram_pages / 2) +		return -ENOMEM; + +	INIT_LIST_HEAD(&pages); +	while (size_remaining > 0) { +		info = alloc_largest_available(sys_heap, buffer, size_remaining, +						max_order); +		if (!info) +			goto free_pages; +		list_add_tail(&info->list, &pages); +		size_remaining -= PAGE_SIZE << info->order; +		max_order = info->order; +		i++; +	} +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		goto free_pages; + +	if (sg_alloc_table(table, i, GFP_KERNEL)) +		goto free_table; + +	sg = table->sgl; +	list_for_each_entry_safe(info, tmp_info, &pages, list) { +		struct page *page = info->page; +		sg_set_page(sg, page, PAGE_SIZE << info->order, 0); +		sg = sg_next(sg); +		list_del(&info->list); +		kfree(info); +	} + +	buffer->priv_virt = table; +	return 0; + +free_table: +	kfree(table); +free_pages: +	list_for_each_entry_safe(info, tmp_info, &pages, list) { +		free_buffer_page(sys_heap, buffer, info->page, info->order); +		kfree(info); +	} +	return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ +	struct ion_system_heap *sys_heap = container_of(buffer->heap, +							struct ion_system_heap, +							heap); +	struct sg_table *table = buffer->sg_table; +	bool cached = ion_buffer_cached(buffer); +	struct scatterlist *sg; +	int i; + +	/* uncached pages come from the page pools, zero them before returning +	   for security purposes (other allocations are zerod at alloc time */ +	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) +		ion_heap_buffer_zero(buffer); + +	for_each_sg(table->sgl, sg, table->nents, i) +		free_buffer_page(sys_heap, buffer, sg_page(sg), +				get_order(sg->length)); +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, +						struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, +				      struct ion_buffer *buffer) +{ +	return; +} + +static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, +					int nr_to_scan) +{ +	struct ion_system_heap *sys_heap; +	int nr_total = 0; +	int i; + +	sys_heap = container_of(heap, struct ion_system_heap, heap); + +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool = sys_heap->pools[i]; + +		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); +	} + +	return nr_total; +} + +static struct ion_heap_ops system_heap_ops = { +	.allocate = ion_system_heap_allocate, +	.free = ion_system_heap_free, +	.map_dma = ion_system_heap_map_dma, +	.unmap_dma = ion_system_heap_unmap_dma, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +	.map_user = ion_heap_map_user, +	.shrink = ion_system_heap_shrink, +}; + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, +				      void *unused) +{ + +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	int i; + +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool = sys_heap->pools[i]; + +		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", +			   pool->high_count, pool->order, +			   (PAGE_SIZE << pool->order) * pool->high_count); +		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", +			   pool->low_count, pool->order, +			   (PAGE_SIZE << pool->order) * pool->low_count); +	} +	return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ +	struct ion_system_heap *heap; +	int i; + +	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); +	if (!heap) +		return ERR_PTR(-ENOMEM); +	heap->heap.ops = &system_heap_ops; +	heap->heap.type = ION_HEAP_TYPE_SYSTEM; +	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; +	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, +			      GFP_KERNEL); +	if (!heap->pools) +		goto free_heap; +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool; +		gfp_t gfp_flags = low_order_gfp_flags; + +		if (orders[i] > 4) +			gfp_flags = high_order_gfp_flags; +		pool = ion_page_pool_create(gfp_flags, orders[i]); +		if (!pool) +			goto destroy_pools; +		heap->pools[i] = pool; +	} + +	heap->heap.debug_show = ion_system_heap_debug_show; +	return &heap->heap; + +destroy_pools: +	while (i--) +		ion_page_pool_destroy(heap->pools[i]); +	kfree(heap->pools); +free_heap: +	kfree(heap); +	return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	int i; + +	for (i = 0; i < num_orders; i++) +		ion_page_pool_destroy(sys_heap->pools[i]); +	kfree(sys_heap->pools); +	kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, +					   struct ion_buffer *buffer, +					   unsigned long len, +					   unsigned long align, +					   unsigned long flags) +{ +	int order = get_order(len); +	struct page *page; +	struct sg_table *table; +	unsigned long i; +	int ret; + +	if (align > (PAGE_SIZE << order)) +		return -EINVAL; + +	page = alloc_pages(low_order_gfp_flags, order); +	if (!page) +		return -ENOMEM; + +	split_page(page, order); + +	len = PAGE_ALIGN(len); +	for (i = len >> PAGE_SHIFT; i < (1 << order); i++) +		__free_page(page + i); + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) { +		ret = -ENOMEM; +		goto free_pages; +	} + +	ret = sg_alloc_table(table, 1, GFP_KERNEL); +	if (ret) +		goto free_table; + +	sg_set_page(table->sgl, page, len, 0); + +	buffer->priv_virt = table; + +	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + +	return 0; + +free_table: +	kfree(table); +free_pages: +	for (i = 0; i < len >> PAGE_SHIFT; i++) +		__free_page(page + i); + +	return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; +	unsigned long i; + +	for (i = 0; i < pages; i++) +		__free_page(page + i); +	sg_free_table(table); +	kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, +				       struct ion_buffer *buffer, +				       ion_phys_addr_t *addr, size_t *len) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	*addr = page_to_phys(page); +	*len = buffer->size; +	return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, +						struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, +					     struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { +	.allocate = ion_system_contig_heap_allocate, +	.free = ion_system_contig_heap_free, +	.phys = ion_system_contig_heap_phys, +	.map_dma = ion_system_contig_heap_map_dma, +	.unmap_dma = ion_system_contig_heap_unmap_dma, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +	.map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ +	struct ion_heap *heap; + +	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); +	if (!heap) +		return ERR_PTR(-ENOMEM); +	heap->ops = &kmalloc_ops; +	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; +	return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ +	kfree(heap); +} diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 00000000000..654acb5c8eb --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,282 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include <linux/dma-buf.h> +#include <linux/dma-direction.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { +	struct miscdevice misc; +}; + +struct ion_test_data { +	struct dma_buf *dma_buf; +	struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, +		void __user *ptr, size_t offset, size_t size, bool write) +{ +	int ret = 0; +	struct dma_buf_attachment *attach; +	struct sg_table *table; +	pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); +	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; +	struct sg_page_iter sg_iter; +	unsigned long offset_page; + +	attach = dma_buf_attach(dma_buf, dev); +	if (IS_ERR(attach)) +		return PTR_ERR(attach); + +	table = dma_buf_map_attachment(attach, dir); +	if (IS_ERR(table)) +		return PTR_ERR(table); + +	offset_page = offset >> PAGE_SHIFT; +	offset %= PAGE_SIZE; + +	for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { +		struct page *page = sg_page_iter_page(&sg_iter); +		void *vaddr = vmap(&page, 1, VM_MAP, pgprot); +		size_t to_copy = PAGE_SIZE - offset; + +		to_copy = min(to_copy, size); +		if (!vaddr) { +			ret = -ENOMEM; +			goto err; +		} + +		if (write) +			ret = copy_from_user(vaddr + offset, ptr, to_copy); +		else +			ret = copy_to_user(ptr, vaddr + offset, to_copy); + +		vunmap(vaddr); +		if (ret) { +			ret = -EFAULT; +			goto err; +		} +		size -= to_copy; +		if (!size) +			break; +		ptr += to_copy; +		offset = 0; +	} + +err: +	dma_buf_unmap_attachment(attach, table, dir); +	dma_buf_detach(dma_buf, attach); +	return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, +		size_t offset, size_t size, bool write) +{ +	int ret; +	unsigned long page_offset = offset >> PAGE_SHIFT; +	size_t copy_offset = offset % PAGE_SIZE; +	size_t copy_size = size; +	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + +	if (offset > dma_buf->size || size > dma_buf->size - offset) +		return -EINVAL; + +	ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); +	if (ret) +		return ret; + +	while (copy_size > 0) { +		size_t to_copy; +		void *vaddr = dma_buf_kmap(dma_buf, page_offset); + +		if (!vaddr) +			goto err; + +		to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + +		if (write) +			ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); +		else +			ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + +		dma_buf_kunmap(dma_buf, page_offset, vaddr); +		if (ret) { +			ret = -EFAULT; +			goto err; +		} + +		copy_size -= to_copy; +		ptr += to_copy; +		page_offset++; +		copy_offset = 0; +	} +err: +	dma_buf_end_cpu_access(dma_buf, offset, size, dir); +	return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, +						unsigned long arg) +{ +	struct ion_test_data *test_data = filp->private_data; +	int ret = 0; + +	union { +		struct ion_test_rw_data test_rw; +	} data; + +	if (_IOC_SIZE(cmd) > sizeof(data)) +		return -EINVAL; + +	if (_IOC_DIR(cmd) & _IOC_WRITE) +		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) +			return -EFAULT; + +	switch (cmd) { +	case ION_IOC_TEST_SET_FD: +	{ +		struct dma_buf *dma_buf = NULL; +		int fd = arg; + +		if (fd >= 0) { +			dma_buf = dma_buf_get((int)arg); +			if (IS_ERR(dma_buf)) +				return PTR_ERR(dma_buf); +		} +		if (test_data->dma_buf) +			dma_buf_put(test_data->dma_buf); +		test_data->dma_buf = dma_buf; +		break; +	} +	case ION_IOC_TEST_DMA_MAPPING: +	{ +		ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, +					u64_to_uptr(data.test_rw.ptr), +					data.test_rw.offset, data.test_rw.size, +					data.test_rw.write); +		break; +	} +	case ION_IOC_TEST_KERNEL_MAPPING: +	{ +		ret = ion_handle_test_kernel(test_data->dma_buf, +					u64_to_uptr(data.test_rw.ptr), +					data.test_rw.offset, data.test_rw.size, +					data.test_rw.write); +		break; +	} +	default: +		return -ENOTTY; +	} + +	if (_IOC_DIR(cmd) & _IOC_READ) { +		if (copy_to_user((void __user *)arg, &data, sizeof(data))) +			return -EFAULT; +	} +	return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ +	struct ion_test_data *data; +	struct miscdevice *miscdev = file->private_data; + +	data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); +	if (!data) +		return -ENOMEM; + +	data->dev = miscdev->parent; + +	file->private_data = data; + +	return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ +	struct ion_test_data *data = file->private_data; + +	kfree(data); + +	return 0; +} + +static const struct file_operations ion_test_fops = { +	.owner = THIS_MODULE, +	.unlocked_ioctl = ion_test_ioctl, +	.compat_ioctl = ion_test_ioctl, +	.open = ion_test_open, +	.release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ +	int ret; +	struct ion_test_device *testdev; + +	testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), +				GFP_KERNEL); +	if (!testdev) +		return -ENOMEM; + +	testdev->misc.minor = MISC_DYNAMIC_MINOR; +	testdev->misc.name = "ion-test"; +	testdev->misc.fops = &ion_test_fops; +	testdev->misc.parent = &pdev->dev; +	ret = misc_register(&testdev->misc); +	if (ret) { +		pr_err("failed to register misc device.\n"); +		return ret; +	} + +	platform_set_drvdata(pdev, testdev); + +	return 0; +} + +static struct platform_driver ion_test_platform_driver = { +	.driver = { +		.name = "ion-test", +	}, +}; + +static int __init ion_test_init(void) +{ +	platform_device_register_simple("ion-test", -1, NULL, 0); +	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ +	platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 00000000000..11cd003fb08 --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000..11c7cceb3c7 --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,82 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ +	struct ion_platform_data *pdata = pdev->dev.platform_data; +	int err; +	int i; + +	num_heaps = pdata->nr; + +	heaps = devm_kzalloc(&pdev->dev, +			     sizeof(struct ion_heap *) * pdata->nr, +			     GFP_KERNEL); + +	idev = ion_device_create(NULL); +	if (IS_ERR_OR_NULL(idev)) +		return PTR_ERR(idev); + +	/* create the heaps as specified in the board file */ +	for (i = 0; i < num_heaps; i++) { +		struct ion_platform_heap *heap_data = &pdata->heaps[i]; + +		heaps[i] = ion_heap_create(heap_data); +		if (IS_ERR_OR_NULL(heaps[i])) { +			err = PTR_ERR(heaps[i]); +			goto err; +		} +		ion_device_add_heap(idev, heaps[i]); +	} +	platform_set_drvdata(pdev, idev); +	return 0; +err: +	for (i = 0; i < num_heaps; i++) { +		if (heaps[i]) +			ion_heap_destroy(heaps[i]); +	} +	return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ +	struct ion_device *idev = platform_get_drvdata(pdev); +	int i; + +	ion_device_destroy(idev); +	for (i = 0; i < num_heaps; i++) +		ion_heap_destroy(heaps[i]); +	return 0; +} + +static struct platform_driver ion_driver = { +	.probe = tegra_ion_probe, +	.remove = tegra_ion_remove, +	.driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index d42f5785f09..2772e01b37f 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -108,6 +108,7 @@ static inline struct logger_log *file_get_log(struct file *file)  {  	if (file->f_mode & FMODE_READ) {  		struct logger_reader *reader = file->private_data; +  		return reader->log;  	} else  		return file->private_data; @@ -124,6 +125,7 @@ static struct logger_entry *get_entry_header(struct logger_log *log,  		size_t off, struct logger_entry *scratch)  {  	size_t len = min(sizeof(struct logger_entry), log->size - off); +  	if (len != sizeof(struct logger_entry)) {  		memcpy(((void *) scratch), log->buffer + off, len);  		memcpy(((void *) scratch) + len, log->buffer, @@ -642,6 +644,7 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)  static long logger_set_version(struct logger_reader *reader, void __user *arg)  {  	int version; +  	if (copy_from_user(&version, arg, sizeof(int)))  		return -EFAULT; diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 6f094b37f1f..b545d3d1da3 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -88,7 +88,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)  	int array_size = ARRAY_SIZE(lowmem_adj);  	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;  	int other_file = global_page_state(NR_FILE_PAGES) - -						global_page_state(NR_SHMEM); +						global_page_state(NR_SHMEM) - +						total_swapcache_pages();  	if (lowmem_adj_size < array_size)  		array_size = lowmem_adj_size; @@ -159,8 +160,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)  			     selected->pid, selected->comm,  			     selected_oom_score_adj, selected_tasksize);  		lowmem_deathpending_timeout = jiffies + HZ; -		send_sig(SIGKILL, selected, 0);  		set_tsk_thread_flag(selected, TIF_MEMDIE); +		send_sig(SIGKILL, selected, 0);  		rem += selected_tasksize;  	} diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c index f24493ac65e..12a136ec1ce 100644 --- a/drivers/staging/android/sw_sync.c +++ b/drivers/staging/android/sw_sync.c @@ -97,6 +97,7 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt,  				       char *str, int size)  {  	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; +  	snprintf(str, size, "%d", pt->value);  } @@ -156,6 +157,7 @@ static int sw_sync_open(struct inode *inode, struct file *file)  static int sw_sync_release(struct inode *inode, struct file *file)  {  	struct sw_sync_timeline *obj = file->private_data; +  	sync_timeline_destroy(&obj->obj);  	return 0;  } diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f1..1a50669ec8a 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -18,10 +18,9 @@  #define _LINUX_SW_SYNC_H  #include <linux/types.h> - -#ifdef __KERNEL__ - +#include <linux/kconfig.h>  #include "sync.h" +#include "uapi/sw_sync.h"  struct sw_sync_timeline {  	struct	sync_timeline	obj; @@ -35,24 +34,26 @@ struct sw_sync_pt {  	u32			value;  }; +#if IS_ENABLED(CONFIG_SW_SYNC)  struct sw_sync_timeline *sw_sync_timeline_create(const char *name);  void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);  struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); - -#endif /* __KERNEL __ */ - -struct sw_sync_create_fence_data { -	__u32	value; -	char	name[32]; -	__s32	fence; /* fd of new fence */ -}; - -#define SW_SYNC_IOC_MAGIC	'W' - -#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\ -		struct sw_sync_create_fence_data) -#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32) - +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ +	return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, +		u32 value) +{ +	return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */  #endif /* _LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 38e5d3b5ed9..18174f7c871 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -79,27 +79,31 @@ static void sync_timeline_free(struct kref *kref)  		container_of(kref, struct sync_timeline, kref);  	unsigned long flags; -	if (obj->ops->release_obj) -		obj->ops->release_obj(obj); -  	spin_lock_irqsave(&sync_timeline_list_lock, flags);  	list_del(&obj->sync_timeline_list);  	spin_unlock_irqrestore(&sync_timeline_list_lock, flags); +	if (obj->ops->release_obj) +		obj->ops->release_obj(obj); +  	kfree(obj);  }  void sync_timeline_destroy(struct sync_timeline *obj)  {  	obj->destroyed = true; +	/* +	 * Ensure timeline is marked as destroyed before +	 * changing timeline's fences status. +	 */ +	smp_wmb();  	/* -	 * If this is not the last reference, signal any children -	 * that their parent is going away. +	 * signal any children that their parent is going away.  	 */ +	sync_timeline_signal(obj); -	if (!kref_put(&obj->kref, sync_timeline_free)) -		sync_timeline_signal(obj); +	kref_put(&obj->kref, sync_timeline_free);  }  EXPORT_SYMBOL(sync_timeline_destroy); @@ -384,6 +388,7 @@ static void sync_fence_detach_pts(struct sync_fence *fence)  	list_for_each_safe(pos, n, &fence->pt_list_head) {  		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); +  		sync_timeline_remove_pt(pt);  	}  } @@ -394,6 +399,7 @@ static void sync_fence_free_pts(struct sync_fence *fence)  	list_for_each_safe(pos, n, &fence->pt_list_head) {  		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); +  		sync_pt_free(pt);  	}  } @@ -827,6 +833,7 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd,  			     unsigned long arg)  {  	struct sync_fence *fence = file->private_data; +  	switch (cmd) {  	case SYNC_IOC_WAIT:  		return sync_fence_ioctl_wait(fence, arg); @@ -856,18 +863,21 @@ static const char *sync_status_str(int status)  static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)  {  	int status = pt->status; +  	seq_printf(s, "  %s%spt %s",  		   fence ? pt->parent->name : "",  		   fence ? "_" : "",  		   sync_status_str(status));  	if (pt->status) {  		struct timeval tv = ktime_to_timeval(pt->timestamp); +  		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);  	}  	if (pt->parent->ops->timeline_value_str &&  	    pt->parent->ops->pt_value_str) {  		char value[64]; +  		pt->parent->ops->pt_value_str(pt, value, sizeof(value));  		seq_printf(s, ": %s", value);  		if (fence) { @@ -892,6 +902,7 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)  	if (obj->ops->timeline_value_str) {  		char value[64]; +  		obj->ops->timeline_value_str(obj, value, sizeof(value));  		seq_printf(s, ": %s", value);  	} else if (obj->ops->print_obj) { @@ -1001,6 +1012,7 @@ static void sync_dump(void)  	for (i = 0; i < s.count; i += DUMP_CHUNK) {  		if ((s.count - i) > DUMP_CHUNK) {  			char c = s.buf[i + DUMP_CHUNK]; +  			s.buf[i + DUMP_CHUNK] = 0;  			pr_cont("%s", s.buf + i);  			s.buf[i + DUMP_CHUNK] = c; diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index 38ea986dc70..eaf57cccf62 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -14,21 +14,21 @@  #define _LINUX_SYNC_H  #include <linux/types.h> -#ifdef __KERNEL__ -  #include <linux/kref.h>  #include <linux/ktime.h>  #include <linux/list.h>  #include <linux/spinlock.h>  #include <linux/wait.h> +#include "uapi/sync.h" +  struct sync_timeline;  struct sync_pt;  struct sync_fence;  /**   * struct sync_timeline_ops - sync object implementation ops - * @driver_name:	name of the implentation + * @driver_name:	name of the implementation   * @dup:		duplicate a sync_pt   * @has_signaled:	returns:   *			  1 if pt has signaled @@ -37,12 +37,12 @@ struct sync_fence;   * @compare:		returns:   *			  1 if b will signal before a   *			  0 if a and b will signal at the same time - *			 -1 if a will signabl before b + *			 -1 if a will signal before b   * @free_pt:		called before sync_pt is freed   * @release_obj:	called before sync_timeline is freed   * @print_obj:		deprecated   * @print_pt:		deprecated - * @fill_driver_data:	write implmentation specific driver data to data. + * @fill_driver_data:	write implementation specific driver data to data.   *			  should return an error if there is not enough room   *			  as specified by size.  This information is returned   *			  to userspace by SYNC_IOC_FENCE_INFO. @@ -53,7 +53,7 @@ struct sync_timeline_ops {  	const char *driver_name;  	/* required */ -	struct sync_pt *(*dup)(struct sync_pt *pt); +	struct sync_pt * (*dup)(struct sync_pt *pt);  	/* required */  	int (*has_signaled)(struct sync_pt *pt); @@ -88,9 +88,9 @@ struct sync_timeline_ops {  /**   * struct sync_timeline - sync object   * @kref:		reference count on fence. - * @ops:		ops that define the implementaiton of the sync_timeline + * @ops:		ops that define the implementation of the sync_timeline   * @name:		name of the sync_timeline. Useful for debugging - * @destoryed:		set when sync_timeline is destroyed + * @destroyed:		set when sync_timeline is destroyed   * @child_list_head:	list of children sync_pts for this sync_timeline   * @child_list_lock:	lock protecting @child_list_head, destroyed, and   *			  sync_pt.status @@ -119,12 +119,12 @@ struct sync_timeline {   * @parent:		sync_timeline to which this sync_pt belongs   * @child_list:		membership in sync_timeline.child_list_head   * @active_list:	membership in sync_timeline.active_list_head - * @signaled_list:	membership in temorary signaled_list on stack + * @signaled_list:	membership in temporary signaled_list on stack   * @fence:		sync_fence to which the sync_pt belongs   * @pt_list:		membership in sync_fence.pt_list_head   * @status:		1: signaled, 0:active, <0: error   * @timestamp:		time which sync_pt status transitioned from active to - *			  singaled or error. + *			  signaled or error.   */  struct sync_pt {  	struct sync_timeline		*parent; @@ -145,9 +145,9 @@ struct sync_pt {  /**   * struct sync_fence - sync fence   * @file:		file representing this fence - * @kref:		referenace count on fence. + * @kref:		reference count on fence.   * @name:		name of sync_fence.  Useful for debugging - * @pt_list_head:	list of sync_pts in ths fence.  immutable once fence + * @pt_list_head:	list of sync_pts in the fence.  immutable once fence   *			  is created   * @waiter_list_head:	list of asynchronous waiters on this fence   * @waiter_list_lock:	lock protecting @waiter_list_head and @status @@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,  /**   * sync_timeline_create() - creates a sync object - * @ops:	specifies the implemention ops for the object + * @ops:	specifies the implementation ops for the object   * @size:	size to allocate for this obj   * @name:	sync_timeline name   * - * Creates a new sync_timeline which will use the implemetation specified by - * @ops.  @size bytes will be allocated allowing for implemntation specific - * data to be kept after the generic sync_timeline stuct. + * Creates a new sync_timeline which will use the implementation specified by + * @ops.  @size bytes will be allocated allowing for implementation specific + * data to be kept after the generic sync_timeline struct.   */  struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,  					   int size, const char *name);  /** - * sync_timeline_destory() - destorys a sync object + * sync_timeline_destroy() - destroys a sync object   * @obj:	sync_timeline to destroy   * - * A sync implemntation should call this when the @obj is going away - * (i.e. module unload.)  @obj won't actually be freed until all its childern + * A sync implementation should call this when the @obj is going away + * (i.e. module unload.)  @obj won't actually be freed until all its children   * sync_pts are freed.   */  void sync_timeline_destroy(struct sync_timeline *obj); @@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj);   * sync_timeline_signal() - signal a status change on a sync_timeline   * @obj:	sync_timeline to signal   * - * A sync implemntation should call this any time one of it's sync_pts + * A sync implementation should call this any time one of it's sync_pts   * has signaled or has an error condition.   */  void sync_timeline_signal(struct sync_timeline *obj); @@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj);   * @parent:	sync_pt's parent sync_timeline   * @size:	size to allocate for this pt   * - * Creates a new sync_pt as a chiled of @parent.  @size bytes will be - * allocated allowing for implemntation specific data to be kept after + * Creates a new sync_pt as a child of @parent.  @size bytes will be + * allocated allowing for implementation specific data to be kept after   * the generic sync_timeline struct.   */  struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); @@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name,  struct sync_fence *sync_fence_fdget(int fd);  /** - * sync_fence_put() - puts a refernnce of a sync fence + * sync_fence_put() - puts a reference of a sync fence   * @fence:	fence to put   *   * Puts a reference on @fence.  If this is the last reference, the fence and @@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence);  /**   * sync_fence_install() - installs a fence into a file descriptor - * @fence:	fence to instal + * @fence:	fence to install   * @fd:		file descriptor in which to install the fence   *   * Installs @fence into @fd.  @fd's should be acquired through get_unused_fd(). @@ -341,86 +341,4 @@ int sync_fence_cancel_async(struct sync_fence *fence,   */  int sync_fence_wait(struct sync_fence *fence, long timeout); -#endif /* __KERNEL__ */ - -/** - * struct sync_merge_data - data passed to merge ioctl - * @fd2:	file descriptor of second fence - * @name:	name of new fence - * @fence:	returns the fd of the new fence to userspace - */ -struct sync_merge_data { -	__s32	fd2; /* fd of second fence */ -	char	name[32]; /* name of new fence */ -	__s32	fence; /* fd on newly created fence */ -}; - -/** - * struct sync_pt_info - detailed sync_pt information - * @len:		length of sync_pt_info including any driver_data - * @obj_name:		name of parent sync_timeline - * @driver_name:	name of driver implmenting the parent - * @status:		status of the sync_pt 0:active 1:signaled <0:error - * @timestamp_ns:	timestamp of status change in nanoseconds - * @driver_data:	any driver dependant data - */ -struct sync_pt_info { -	__u32	len; -	char	obj_name[32]; -	char	driver_name[32]; -	__s32	status; -	__u64	timestamp_ns; - -	__u8	driver_data[0]; -}; - -/** - * struct sync_fence_info_data - data returned from fence info ioctl - * @len:	ioctl caller writes the size of the buffer its passing in. - *		ioctl returns length of sync_fence_data reutnred to userspace - *		including pt_info. - * @name:	name of fence - * @status:	status of fence. 1: signaled 0:active <0:error - * @pt_info:	a sync_pt_info struct for every sync_pt in the fence - */ -struct sync_fence_info_data { -	__u32	len; -	char	name[32]; -	__s32	status; - -	__u8	pt_info[0]; -}; - -#define SYNC_IOC_MAGIC		'>' - -/** - * DOC: SYNC_IOC_WAIT - wait for a fence to signal - * - * pass timeout in milliseconds.  Waits indefinitely timeout < 0. - */ -#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32) - -/** - * DOC: SYNC_IOC_MERGE - merge two fences - * - * Takes a struct sync_merge_data.  Creates a new fence containing copies of - * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the - * new fence's fd in sync_merge_data.fence - */ -#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) - -/** - * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence - * - * Takes a struct sync_fence_info_data with extra space allocated for pt_info. - * Caller should write the size of the buffer into len.  On return, len is - * updated to reflect the total size of the sync_fence_info_data including - * pt_info. - * - * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. - * To itterate over the sync_pt_infos, use the sync_pt_info.len field. - */ -#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\ -	struct sync_fence_info_data) -  #endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index e81451425c0..180c209a009 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -51,6 +51,7 @@ static int gpio_get_time(struct timed_output_dev *dev)  	if (hrtimer_active(&data->timer)) {  		ktime_t r = hrtimer_get_remaining(&data->timer);  		struct timeval t = ktime_to_timeval(r); +  		return t.tv_sec * 1000 + t.tv_usec / 1000;  	} else  		return 0; @@ -90,7 +91,8 @@ static int timed_gpio_probe(struct platform_device *pdev)  	if (!pdata)  		return -EBUSY; -	gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, +	gpio_data = devm_kzalloc(&pdev->dev, +			sizeof(struct timed_gpio_data) * pdata->num_gpios,  			GFP_KERNEL);  	if (!gpio_data)  		return -ENOMEM; @@ -131,7 +133,6 @@ err_out:  		timed_output_dev_unregister(&gpio_data[i].dev);  		gpio_free(gpio_data[i].gpio);  	} -	kfree(gpio_data);  	return ret;  } @@ -147,8 +148,6 @@ static int timed_gpio_remove(struct platform_device *pdev)  		gpio_free(gpio_data[i].gpio);  	} -	kfree(gpio_data); -  	return 0;  } diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c index 2c617834dc4..c341ac11c5a 100644 --- a/drivers/staging/android/timed_output.c +++ b/drivers/staging/android/timed_output.c @@ -97,7 +97,6 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)  {  	tdev->enable(tdev, 0);  	device_destroy(timed_output_class, MKDEV(0, tdev->index)); -	dev_set_drvdata(tdev->dev, NULL);  }  EXPORT_SYMBOL_GPL(timed_output_dev_unregister); diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h index ec907ab2ff5..13d2ca51cbe 100644 --- a/drivers/staging/android/timed_output.h +++ b/drivers/staging/android/timed_output.h @@ -20,10 +20,10 @@ struct timed_output_dev {  	const char	*name;  	/* enable the output and set the timer */ -	void	(*enable)(struct timed_output_dev *sdev, int timeout); +	void (*enable)(struct timed_output_dev *sdev, int timeout);  	/* returns the current number of milliseconds remaining on the timer */ -	int		(*get_time)(struct timed_output_dev *sdev); +	int (*get_time)(struct timed_output_dev *sdev);  	/* private data */  	struct device	*dev; @@ -31,7 +31,7 @@ struct timed_output_dev {  	int		state;  }; -extern int timed_output_dev_register(struct timed_output_dev *dev); -extern void timed_output_dev_unregister(struct timed_output_dev *dev); +int timed_output_dev_register(struct timed_output_dev *dev); +void timed_output_dev_unregister(struct timed_output_dev *dev);  #endif diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h new file mode 100644 index 00000000000..aa013f6f5f3 --- /dev/null +++ b/drivers/staging/android/uapi/android_alarm.h @@ -0,0 +1,62 @@ +/* drivers/staging/android/uapi/android_alarm.h + * + * Copyright (C) 2006-2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ANDROID_ALARM_H +#define _UAPI_LINUX_ANDROID_ALARM_H + +#include <linux/ioctl.h> +#include <linux/time.h> + +enum android_alarm_type { +	/* return code bit numbers or set alarm arg */ +	ANDROID_ALARM_RTC_WAKEUP, +	ANDROID_ALARM_RTC, +	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, +	ANDROID_ALARM_ELAPSED_REALTIME, +	ANDROID_ALARM_SYSTEMTIME, + +	ANDROID_ALARM_TYPE_COUNT, + +	/* return code bit numbers */ +	/* ANDROID_ALARM_TIME_CHANGE = 16 */ +}; + +enum android_alarm_return_flags { +	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, +	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, +	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = +				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, +	ANDROID_ALARM_ELAPSED_REALTIME_MASK = +				1U << ANDROID_ALARM_ELAPSED_REALTIME, +	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, +	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 +}; + +/* Disable alarm */ +#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4)) + +/* Ack last alarm and wait for next */ +#define ANDROID_ALARM_WAIT                  _IO('a', 1) + +#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size) +/* Set alarm */ +#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec) +#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec) +#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec) +#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0))) +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4) + +#endif diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h new file mode 100644 index 00000000000..ba4743c71d6 --- /dev/null +++ b/drivers/staging/android/uapi/ashmem.h @@ -0,0 +1,47 @@ +/* + * drivers/staging/android/uapi/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed.  It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _UAPI_LINUX_ASHMEM_H +#define _UAPI_LINUX_ASHMEM_H + +#include <linux/ioctl.h> + +#define ASHMEM_NAME_LEN		256 + +#define ASHMEM_NAME_DEF		"dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED	0 +#define ASHMEM_WAS_PURGED	1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED	0 +#define ASHMEM_IS_PINNED	1 + +struct ashmem_pin { +	__u32 offset;	/* offset into region, in bytes, page-aligned */ +	__u32 len;	/* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC		0x77 + +#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10) + +#endif	/* _UAPI_LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h new file mode 100644 index 00000000000..904adb7600c --- /dev/null +++ b/drivers/staging/android/uapi/binder.h @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_BINDER_H +#define _UAPI_LINUX_BINDER_H + +#include <linux/ioctl.h> + +#define B_PACK_CHARS(c1, c2, c3, c4) \ +	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) +#define B_TYPE_LARGE 0x85 + +enum { +	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), +	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), +	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), +	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), +	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), +}; + +enum { +	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, +	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, +}; + +#ifdef BINDER_IPC_32BIT +typedef __u32 binder_size_t; +typedef __u32 binder_uintptr_t; +#else +typedef __u64 binder_size_t; +typedef __u64 binder_uintptr_t; +#endif + +/* + * This is the flattened representation of a Binder object for transfer + * between processes.  The 'offsets' supplied as part of a binder transaction + * contains offsets into the data where these structures occur.  The Binder + * driver takes care of re-writing the structure type and data as it moves + * between processes. + */ +struct flat_binder_object { +	/* 8 bytes for large_flat_header. */ +	__u32		type; +	__u32		flags; + +	/* 8 bytes of data. */ +	union { +		binder_uintptr_t	binder;	/* local object */ +		__u32			handle;	/* remote object */ +	}; + +	/* extra data associated with local object */ +	binder_uintptr_t	cookie; +}; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses appropriately. + */ + +struct binder_write_read { +	binder_size_t		write_size;	/* bytes to write */ +	binder_size_t		write_consumed;	/* bytes consumed by driver */ +	binder_uintptr_t	write_buffer; +	binder_size_t		read_size;	/* bytes to read */ +	binder_size_t		read_consumed;	/* bytes consumed by driver */ +	binder_uintptr_t	read_buffer; +}; + +/* Use with BINDER_VERSION, driver fills in fields. */ +struct binder_version { +	/* driver protocol version -- increment with incompatible change */ +	__s32       protocol_version; +}; + +/* This is the current protocol version. */ +#ifdef BINDER_IPC_32BIT +#define BINDER_CURRENT_PROTOCOL_VERSION 7 +#else +#define BINDER_CURRENT_PROTOCOL_VERSION 8 +#endif + +#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read) +#define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64) +#define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32) +#define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32) +#define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32) +#define BINDER_THREAD_EXIT		_IOW('b', 8, __s32) +#define BINDER_VERSION			_IOWR('b', 9, struct binder_version) + +/* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: + * + * EINTR -- The operation has been interupted.  This should be + * handled by retrying the ioctl() until a different error code + * is returned. + * + * ECONNREFUSED -- The driver is no longer accepting operations + * from your process.  That is, the process is being destroyed. + * You should handle this by exiting from your process.  Note + * that once this error code is returned, all further calls to + * the driver from any thread will return this same code. + */ + +enum transaction_flags { +	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */ +	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */ +	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */ +	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */ +}; + +struct binder_transaction_data { +	/* The first two are only used for bcTRANSACTION and brTRANSACTION, +	 * identifying the target and contents of the transaction. +	 */ +	union { +		/* target descriptor of command transaction */ +		__u32	handle; +		/* target descriptor of return transaction */ +		binder_uintptr_t ptr; +	} target; +	binder_uintptr_t	cookie;	/* target object cookie */ +	__u32		code;		/* transaction command */ + +	/* General information about the transaction. */ +	__u32	        flags; +	pid_t		sender_pid; +	uid_t		sender_euid; +	binder_size_t	data_size;	/* number of bytes of data */ +	binder_size_t	offsets_size;	/* number of bytes of offsets */ + +	/* If this transaction is inline, the data immediately +	 * follows here; otherwise, it ends with a pointer to +	 * the data buffer. +	 */ +	union { +		struct { +			/* transaction data */ +			binder_uintptr_t	buffer; +			/* offsets from buffer to flat_binder_object structs */ +			binder_uintptr_t	offsets; +		} ptr; +		__u8	buf[8]; +	} data; +}; + +struct binder_ptr_cookie { +	binder_uintptr_t ptr; +	binder_uintptr_t cookie; +}; + +struct binder_handle_cookie { +	__u32 handle; +	binder_uintptr_t cookie; +} __attribute__((packed)); + +struct binder_pri_desc { +	__s32 priority; +	__u32 desc; +}; + +struct binder_pri_ptr_cookie { +	__s32 priority; +	binder_uintptr_t ptr; +	binder_uintptr_t cookie; +}; + +enum binder_driver_return_protocol { +	BR_ERROR = _IOR('r', 0, __s32), +	/* +	 * int: error code +	 */ + +	BR_OK = _IO('r', 1), +	/* No parameters! */ + +	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), +	BR_REPLY = _IOR('r', 3, struct binder_transaction_data), +	/* +	 * binder_transaction_data: the received command. +	 */ + +	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), +	/* +	 * not currently supported +	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. +	 * Else the remote object has acquired a primary reference. +	 */ + +	BR_DEAD_REPLY = _IO('r', 5), +	/* +	 * The target of the last transaction (either a bcTRANSACTION or +	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters. +	 */ + +	BR_TRANSACTION_COMPLETE = _IO('r', 6), +	/* +	 * No parameters... always refers to the last transaction requested +	 * (including replies).  Note that this will be sent even for +	 * asynchronous transactions. +	 */ + +	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), +	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), +	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), +	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), +	/* +	 * void *:	ptr to binder +	 * void *: cookie for binder +	 */ + +	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), +	/* +	 * not currently supported +	 * int:	priority +	 * void *: ptr to binder +	 * void *: cookie for binder +	 */ + +	BR_NOOP = _IO('r', 12), +	/* +	 * No parameters.  Do nothing and examine the next command.  It exists +	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command. +	 */ + +	BR_SPAWN_LOOPER = _IO('r', 13), +	/* +	 * No parameters.  The driver has determined that a process has no +	 * threads waiting to service incoming transactions.  When a process +	 * receives this command, it must spawn a new service thread and +	 * register it via bcENTER_LOOPER. +	 */ + +	BR_FINISHED = _IO('r', 14), +	/* +	 * not currently supported +	 * stop threadpool thread +	 */ + +	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ +	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ + +	BR_FAILED_REPLY = _IO('r', 17), +	/* +	 * The the last transaction (either a bcTRANSACTION or +	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters. +	 */ +}; + +enum binder_driver_command_protocol { +	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), +	BC_REPLY = _IOW('c', 1, struct binder_transaction_data), +	/* +	 * binder_transaction_data: the sent command. +	 */ + +	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), +	/* +	 * not currently supported +	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful. +	 * Else you have acquired a primary reference on the object. +	 */ + +	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), +	/* +	 * void *: ptr to transaction data received on a read +	 */ + +	BC_INCREFS = _IOW('c', 4, __u32), +	BC_ACQUIRE = _IOW('c', 5, __u32), +	BC_RELEASE = _IOW('c', 6, __u32), +	BC_DECREFS = _IOW('c', 7, __u32), +	/* +	 * int:	descriptor +	 */ + +	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), +	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), +	/* +	 * void *: ptr to binder +	 * void *: cookie for binder +	 */ + +	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), +	/* +	 * not currently supported +	 * int: priority +	 * int: descriptor +	 */ + +	BC_REGISTER_LOOPER = _IO('c', 11), +	/* +	 * No parameters. +	 * Register a spawned looper thread with the device. +	 */ + +	BC_ENTER_LOOPER = _IO('c', 12), +	BC_EXIT_LOOPER = _IO('c', 13), +	/* +	 * No parameters. +	 * These two commands are sent as an application-level thread +	 * enters and exits the binder loop, respectively.  They are +	 * used so the binder can have an accurate count of the number +	 * of looping threads it has available. +	 */ + +	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, +						struct binder_handle_cookie), +	/* +	 * int: handle +	 * void *: cookie +	 */ + +	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, +						struct binder_handle_cookie), +	/* +	 * int: handle +	 * void *: cookie +	 */ + +	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ +}; + +#endif /* _UAPI_LINUX_BINDER_H */ + diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 00000000000..6aa49567337 --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved + *				 carveout heap, allocations are physically + *				 contiguous + * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API + * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask + *				 is used to identify the heaps, so only 32 + *				 total heap types are supported + */ +enum ion_heap_type { +	ION_HEAP_TYPE_SYSTEM, +	ION_HEAP_TYPE_SYSTEM_CONTIG, +	ION_HEAP_TYPE_CARVEOUT, +	ION_HEAP_TYPE_CHUNK, +	ION_HEAP_TYPE_DMA, +	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always +				 are at the end of this enum */ +	ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8) + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1		/* mappings of this buffer should be +					   cached, ion will do cache +					   maintenance when the buffer is +					   mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2	/* mappings of this buffer will created +					   at mmap time, if this is set +					   caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len:		size of the allocation + * @align:		required alignment of the allocation + * @heap_id_mask:	mask of heap ids to allocate from + * @flags:		flags passed to heap + * @handle:		pointer that will be populated with a cookie to use to + *			refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { +	size_t len; +	size_t align; +	unsigned int heap_id_mask; +	unsigned int flags; +	ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle:	a handle + * @fd:		a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { +	ion_user_handle_t handle; +	int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle:	a handle + */ +struct ion_handle_data { +	ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd:	the custom ioctl function to call + * @arg:	additional data to pass to the custom ioctl, typically a user + *		pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { +	unsigned int cmd; +	unsigned long arg; +}; + +#define ION_IOC_MAGIC		'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \ +				      struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle.  Returns the struct with the fd field set to a file + * descriptor open in the current address space.  This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle.  Returns the struct with the fd field set to a file + * descriptor open in the current address space.  This file descriptor + * can then be passed to another process.  The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 00000000000..ffef06f6313 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr:	a pointer to an area at least as large as size + * @offset:	offset into the ion buffer to start reading + * @size:	size to read or write + * @write:	1 to write, 0 to read + */ +struct ion_test_rw_data { +	__u64 ptr; +	__u64 offset; +	__u64 size; +	int write; +	int __padding; +}; + +#define ION_IOC_MAGIC		'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ +			_IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping.  Can be + * used by unit tests to emulate a DMA engine as close as possible.  Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ +			_IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping.  Can be + * used by unit tests to test heap map_kernel functions.  Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ +			_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h new file mode 100644 index 00000000000..9b5d4869505 --- /dev/null +++ b/drivers/staging/android/uapi/sw_sync.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SW_SYNC_H +#define _UAPI_LINUX_SW_SYNC_H + +#include <linux/types.h> + +struct sw_sync_create_fence_data { +	__u32	value; +	char	name[32]; +	__s32	fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC	'W' + +#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\ +		struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +#endif /* _UAPI_LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h new file mode 100644 index 00000000000..e964c751f6b --- /dev/null +++ b/drivers/staging/android/uapi/sync.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SYNC_H +#define _UAPI_LINUX_SYNC_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2:	file descriptor of second fence + * @name:	name of new fence + * @fence:	returns the fd of the new fence to userspace + */ +struct sync_merge_data { +	__s32	fd2; /* fd of second fence */ +	char	name[32]; /* name of new fence */ +	__s32	fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len:		length of sync_pt_info including any driver_data + * @obj_name:		name of parent sync_timeline + * @driver_name:	name of driver implementing the parent + * @status:		status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns:	timestamp of status change in nanoseconds + * @driver_data:	any driver dependent data + */ +struct sync_pt_info { +	__u32	len; +	char	obj_name[32]; +	char	driver_name[32]; +	__s32	status; +	__u64	timestamp_ns; + +	__u8	driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len:	ioctl caller writes the size of the buffer its passing in. + *		ioctl returns length of sync_fence_data returned to userspace + *		including pt_info. + * @name:	name of fence + * @status:	status of fence. 1: signaled 0:active <0:error + * @pt_info:	a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { +	__u32	len; +	char	name[32]; +	__s32	status; + +	__u8	pt_info[0]; +}; + +#define SYNC_IOC_MAGIC		'>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds.  Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data.  Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len.  On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\ +	struct sync_fence_info_data) + +#endif /* _UAPI_LINUX_SYNC_H */  | 
