diff options
Diffstat (limited to 'drivers/staging/android')
46 files changed, 14339 insertions, 0 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig new file mode 100644 index 00000000000..99e484f845f --- /dev/null +++ b/drivers/staging/android/Kconfig @@ -0,0 +1,118 @@ +menu "Android" + +config ANDROID +	bool "Android Drivers" +	---help--- +	  Enable support for various drivers needed on the Android platform + +if ANDROID + +config ANDROID_BINDER_IPC +	bool "Android Binder IPC Driver" +	depends on MMU +	default n +	---help--- +	  Binder is used in Android for both communication between processes, +	  and remote method invocation. + +	  This means one Android process can call a method/routine in another +	  Android process, using Binder to identify, invoke and pass arguments +	  between said processes. + +config ANDROID_BINDER_IPC_32BIT +	bool +	depends on !64BIT && ANDROID_BINDER_IPC +	default y +	---help--- +	  The Binder API has been changed to support both 32 and 64bit +	  applications in a mixed environment. + +	  Enable this to support an old 32-bit Android user-space (v4.4 and +	  earlier). + +	  Note that enabling this will break newer Android user-space. + +config ASHMEM +	bool "Enable the Anonymous Shared Memory Subsystem" +	default n +	depends on SHMEM +	---help--- +	  The ashmem subsystem is a new shared memory allocator, similar to +	  POSIX SHM but with different behavior and sporting a simpler +	  file-based API. + +	  It is, in theory, a good memory allocator for low-memory devices, +	  because it can discard shared memory units when under memory pressure. + +config ANDROID_LOGGER +	tristate "Android log driver" +	default n +	---help--- +	  This adds support for system-wide logging using four log buffers. + +	  These are: + +	      1: main +	      2: events +	      3: radio +	      4: system + +	  Log reading and writing is performed via normal Linux reads and +	  optimized writes. This optimization avoids logging having too +	  much overhead in the system. + +config ANDROID_TIMED_OUTPUT +	bool "Timed output class driver" +	default y + +config ANDROID_TIMED_GPIO +	tristate "Android timed gpio driver" +	depends on GPIOLIB && ANDROID_TIMED_OUTPUT +	default n + +config ANDROID_LOW_MEMORY_KILLER +	bool "Android Low Memory Killer" +	---help--- +	  Registers processes to be killed when memory is low + +config ANDROID_INTF_ALARM_DEV +	bool "Android alarm driver" +	depends on RTC_CLASS +	default n +	---help--- +	  Provides non-wakeup and rtc backed wakeup alarms based on rtc or +	  elapsed realtime, and a non-wakeup alarm on the monotonic clock. +	  Also exports the alarm interface to user-space. + +config SYNC +	bool "Synchronization framework" +	default n +	select ANON_INODES +	---help--- +	  This option enables the framework for synchronization between multiple +	  drivers.  Sync implementations can take advantage of hardware +	  synchronization built into devices like GPUs. + +config SW_SYNC +	bool "Software synchronization objects" +	default n +	depends on SYNC +	---help--- +	  A sync object driver that uses a 32bit counter to coordinate +	  syncrhronization.  Useful when there is no hardware primitive backing +	  the synchronization. + +config SW_SYNC_USER +	bool "Userspace API for SW_SYNC" +	default n +	depends on SW_SYNC +	---help--- +	  Provides a user space API to the sw sync object. +	  *WARNING* improper use of this can result in deadlocking kernel +	  drivers from userspace. + +source "drivers/staging/android/ion/Kconfig" + +endif # if ANDROID + +endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile new file mode 100644 index 00000000000..0a01e191490 --- /dev/null +++ b/drivers/staging/android/Makefile @@ -0,0 +1,13 @@ +ccflags-y += -I$(src)			# needed for trace events + +obj-y					+= ion/ + +obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o +obj-$(CONFIG_ASHMEM)			+= ashmem.o +obj-$(CONFIG_ANDROID_LOGGER)		+= logger.o +obj-$(CONFIG_ANDROID_TIMED_OUTPUT)	+= timed_output.o +obj-$(CONFIG_ANDROID_TIMED_GPIO)	+= timed_gpio.o +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o +obj-$(CONFIG_ANDROID_INTF_ALARM_DEV)	+= alarm-dev.o +obj-$(CONFIG_SYNC)			+= sync.o +obj-$(CONFIG_SW_SYNC)			+= sw_sync.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO new file mode 100644 index 00000000000..b15fb0d6b15 --- /dev/null +++ b/drivers/staging/android/TODO @@ -0,0 +1,10 @@ +TODO: +	- checkpatch.pl cleanups +	- sparse fixes +	- rename files to be not so "generic" +	- make sure things build as modules properly +	- add proper arch dependencies as needed +	- audit userspace interfaces to make sure they are sane + +Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: +Brian Swetland <swetland@google.com> diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c new file mode 100644 index 00000000000..f200e8a8432 --- /dev/null +++ b/drivers/staging/android/alarm-dev.c @@ -0,0 +1,446 @@ +/* drivers/rtc/alarm-dev.c + * + * Copyright (C) 2007-2009 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/time.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/alarmtimer.h> +#include "android_alarm.h" + +#define ANDROID_ALARM_PRINT_INFO (1U << 0) +#define ANDROID_ALARM_PRINT_IO (1U << 1) +#define ANDROID_ALARM_PRINT_INT (1U << 2) + +static int debug_mask = ANDROID_ALARM_PRINT_INFO; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define alarm_dbg(debug_level_mask, fmt, ...)				\ +do {									\ +	if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask)	\ +		pr_info(fmt, ##__VA_ARGS__);				\ +} while (0) + +#define ANDROID_ALARM_WAKEUP_MASK ( \ +	ANDROID_ALARM_RTC_WAKEUP_MASK | \ +	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) + +static int alarm_opened; +static DEFINE_SPINLOCK(alarm_slock); +static struct wakeup_source alarm_wake_lock; +static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue); +static uint32_t alarm_pending; +static uint32_t alarm_enabled; +static uint32_t wait_pending; + +struct devalarm { +	union { +		struct hrtimer hrt; +		struct alarm alrm; +	} u; +	enum android_alarm_type type; +}; + +static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT]; + +/** + * is_wakeup() - Checks to see if this alarm can wake the device + * @type:	 The type of alarm being checked + * + * Return: 1 if this is a wakeup alarm, otherwise 0 + */ +static int is_wakeup(enum android_alarm_type type) +{ +	return type == ANDROID_ALARM_RTC_WAKEUP || +		type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP; +} + +static void devalarm_start(struct devalarm *alrm, ktime_t exp) +{ +	if (is_wakeup(alrm->type)) +		alarm_start(&alrm->u.alrm, exp); +	else +		hrtimer_start(&alrm->u.hrt, exp, HRTIMER_MODE_ABS); +} + +static int devalarm_try_to_cancel(struct devalarm *alrm) +{ +	if (is_wakeup(alrm->type)) +		return alarm_try_to_cancel(&alrm->u.alrm); +	return hrtimer_try_to_cancel(&alrm->u.hrt); +} + +static void devalarm_cancel(struct devalarm *alrm) +{ +	if (is_wakeup(alrm->type)) +		alarm_cancel(&alrm->u.alrm); +	else +		hrtimer_cancel(&alrm->u.hrt); +} + +static void alarm_clear(enum android_alarm_type alarm_type) +{ +	uint32_t alarm_type_mask = 1U << alarm_type; +	unsigned long flags; + +	spin_lock_irqsave(&alarm_slock, flags); +	alarm_dbg(IO, "alarm %d clear\n", alarm_type); +	devalarm_try_to_cancel(&alarms[alarm_type]); +	if (alarm_pending) { +		alarm_pending &= ~alarm_type_mask; +		if (!alarm_pending && !wait_pending) +			__pm_relax(&alarm_wake_lock); +	} +	alarm_enabled &= ~alarm_type_mask; +	spin_unlock_irqrestore(&alarm_slock, flags); +} + +static void alarm_set(enum android_alarm_type alarm_type, +							struct timespec *ts) +{ +	uint32_t alarm_type_mask = 1U << alarm_type; +	unsigned long flags; + +	spin_lock_irqsave(&alarm_slock, flags); +	alarm_dbg(IO, "alarm %d set %ld.%09ld\n", +			alarm_type, ts->tv_sec, ts->tv_nsec); +	alarm_enabled |= alarm_type_mask; +	devalarm_start(&alarms[alarm_type], timespec_to_ktime(*ts)); +	spin_unlock_irqrestore(&alarm_slock, flags); +} + +static int alarm_wait(void) +{ +	unsigned long flags; +	int rv = 0; + +	spin_lock_irqsave(&alarm_slock, flags); +	alarm_dbg(IO, "alarm wait\n"); +	if (!alarm_pending && wait_pending) { +		__pm_relax(&alarm_wake_lock); +		wait_pending = 0; +	} +	spin_unlock_irqrestore(&alarm_slock, flags); + +	rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); +	if (rv) +		return rv; + +	spin_lock_irqsave(&alarm_slock, flags); +	rv = alarm_pending; +	wait_pending = 1; +	alarm_pending = 0; +	spin_unlock_irqrestore(&alarm_slock, flags); + +	return rv; +} + +static int alarm_set_rtc(struct timespec *ts) +{ +	struct rtc_time new_rtc_tm; +	struct rtc_device *rtc_dev; +	unsigned long flags; +	int rv = 0; + +	rtc_time_to_tm(ts->tv_sec, &new_rtc_tm); +	rtc_dev = alarmtimer_get_rtcdev(); +	rv = do_settimeofday(ts); +	if (rv < 0) +		return rv; +	if (rtc_dev) +		rv = rtc_set_time(rtc_dev, &new_rtc_tm); + +	spin_lock_irqsave(&alarm_slock, flags); +	alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK; +	wake_up(&alarm_wait_queue); +	spin_unlock_irqrestore(&alarm_slock, flags); + +	return rv; +} + +static int alarm_get_time(enum android_alarm_type alarm_type, +							struct timespec *ts) +{ +	int rv = 0; + +	switch (alarm_type) { +	case ANDROID_ALARM_RTC_WAKEUP: +	case ANDROID_ALARM_RTC: +		getnstimeofday(ts); +		break; +	case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP: +	case ANDROID_ALARM_ELAPSED_REALTIME: +		get_monotonic_boottime(ts); +		break; +	case ANDROID_ALARM_SYSTEMTIME: +		ktime_get_ts(ts); +		break; +	default: +		rv = -EINVAL; +	} +	return rv; +} + +static long alarm_do_ioctl(struct file *file, unsigned int cmd, +							struct timespec *ts) +{ +	int rv = 0; +	unsigned long flags; +	enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd); + +	if (alarm_type >= ANDROID_ALARM_TYPE_COUNT) +		return -EINVAL; + +	if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) { +		if ((file->f_flags & O_ACCMODE) == O_RDONLY) +			return -EPERM; +		if (file->private_data == NULL && +		    cmd != ANDROID_ALARM_SET_RTC) { +			spin_lock_irqsave(&alarm_slock, flags); +			if (alarm_opened) { +				spin_unlock_irqrestore(&alarm_slock, flags); +				return -EBUSY; +			} +			alarm_opened = 1; +			file->private_data = (void *)1; +			spin_unlock_irqrestore(&alarm_slock, flags); +		} +	} + +	switch (ANDROID_ALARM_BASE_CMD(cmd)) { +	case ANDROID_ALARM_CLEAR(0): +		alarm_clear(alarm_type); +		break; +	case ANDROID_ALARM_SET(0): +		alarm_set(alarm_type, ts); +		break; +	case ANDROID_ALARM_SET_AND_WAIT(0): +		alarm_set(alarm_type, ts); +		/* fall though */ +	case ANDROID_ALARM_WAIT: +		rv = alarm_wait(); +		break; +	case ANDROID_ALARM_SET_RTC: +		rv = alarm_set_rtc(ts); +		break; +	case ANDROID_ALARM_GET_TIME(0): +		rv = alarm_get_time(alarm_type, ts); +		break; + +	default: +		rv = -EINVAL; +	} +	return rv; +} + +static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + +	struct timespec ts; +	int rv; + +	switch (ANDROID_ALARM_BASE_CMD(cmd)) { +	case ANDROID_ALARM_SET_AND_WAIT(0): +	case ANDROID_ALARM_SET(0): +	case ANDROID_ALARM_SET_RTC: +		if (copy_from_user(&ts, (void __user *)arg, sizeof(ts))) +			return -EFAULT; +		break; +	} + +	rv = alarm_do_ioctl(file, cmd, &ts); +	if (rv) +		return rv; + +	switch (ANDROID_ALARM_BASE_CMD(cmd)) { +	case ANDROID_ALARM_GET_TIME(0): +		if (copy_to_user((void __user *)arg, &ts, sizeof(ts))) +			return -EFAULT; +		break; +	} + +	return 0; +} + +#ifdef CONFIG_COMPAT +static long alarm_compat_ioctl(struct file *file, unsigned int cmd, +							unsigned long arg) +{ + +	struct timespec ts; +	int rv; + +	switch (ANDROID_ALARM_BASE_CMD(cmd)) { +	case ANDROID_ALARM_SET_AND_WAIT_COMPAT(0): +	case ANDROID_ALARM_SET_COMPAT(0): +	case ANDROID_ALARM_SET_RTC_COMPAT: +		if (compat_get_timespec(&ts, (void __user *)arg)) +			return -EFAULT; +		/* fall through */ +	case ANDROID_ALARM_GET_TIME_COMPAT(0): +		cmd = ANDROID_ALARM_COMPAT_TO_NORM(cmd); +		break; +	} + +	rv = alarm_do_ioctl(file, cmd, &ts); +	if (rv) +		return rv; + +	switch (ANDROID_ALARM_BASE_CMD(cmd)) { +	case ANDROID_ALARM_GET_TIME(0): /* NOTE: we modified cmd above */ +		if (compat_put_timespec(&ts, (void __user *)arg)) +			return -EFAULT; +		break; +	} + +	return 0; +} +#endif + +static int alarm_open(struct inode *inode, struct file *file) +{ +	file->private_data = NULL; +	return 0; +} + +static int alarm_release(struct inode *inode, struct file *file) +{ +	int i; +	unsigned long flags; + +	spin_lock_irqsave(&alarm_slock, flags); +	if (file->private_data) { +		for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { +			uint32_t alarm_type_mask = 1U << i; + +			if (alarm_enabled & alarm_type_mask) { +				alarm_dbg(INFO, +					  "%s: clear alarm, pending %d\n", +					  __func__, +					  !!(alarm_pending & alarm_type_mask)); +				alarm_enabled &= ~alarm_type_mask; +			} +			spin_unlock_irqrestore(&alarm_slock, flags); +			devalarm_cancel(&alarms[i]); +			spin_lock_irqsave(&alarm_slock, flags); +		} +		if (alarm_pending | wait_pending) { +			if (alarm_pending) +				alarm_dbg(INFO, "%s: clear pending alarms %x\n", +					  __func__, alarm_pending); +			__pm_relax(&alarm_wake_lock); +			wait_pending = 0; +			alarm_pending = 0; +		} +		alarm_opened = 0; +	} +	spin_unlock_irqrestore(&alarm_slock, flags); +	return 0; +} + +static void devalarm_triggered(struct devalarm *alarm) +{ +	unsigned long flags; +	uint32_t alarm_type_mask = 1U << alarm->type; + +	alarm_dbg(INT, "%s: type %d\n", __func__, alarm->type); +	spin_lock_irqsave(&alarm_slock, flags); +	if (alarm_enabled & alarm_type_mask) { +		__pm_wakeup_event(&alarm_wake_lock, 5000); /* 5secs */ +		alarm_enabled &= ~alarm_type_mask; +		alarm_pending |= alarm_type_mask; +		wake_up(&alarm_wait_queue); +	} +	spin_unlock_irqrestore(&alarm_slock, flags); +} + +static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt) +{ +	struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt); + +	devalarm_triggered(devalrm); +	return HRTIMER_NORESTART; +} + +static enum alarmtimer_restart devalarm_alarmhandler(struct alarm *alrm, +							ktime_t now) +{ +	struct devalarm *devalrm = container_of(alrm, struct devalarm, u.alrm); + +	devalarm_triggered(devalrm); +	return ALARMTIMER_NORESTART; +} + + +static const struct file_operations alarm_fops = { +	.owner = THIS_MODULE, +	.unlocked_ioctl = alarm_ioctl, +	.open = alarm_open, +	.release = alarm_release, +#ifdef CONFIG_COMPAT +	.compat_ioctl = alarm_compat_ioctl, +#endif +}; + +static struct miscdevice alarm_device = { +	.minor = MISC_DYNAMIC_MINOR, +	.name = "alarm", +	.fops = &alarm_fops, +}; + +static int __init alarm_dev_init(void) +{ +	int err; +	int i; + +	err = misc_register(&alarm_device); +	if (err) +		return err; + +	alarm_init(&alarms[ANDROID_ALARM_RTC_WAKEUP].u.alrm, +			ALARM_REALTIME, devalarm_alarmhandler); +	hrtimer_init(&alarms[ANDROID_ALARM_RTC].u.hrt, +			CLOCK_REALTIME, HRTIMER_MODE_ABS); +	alarm_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].u.alrm, +			ALARM_BOOTTIME, devalarm_alarmhandler); +	hrtimer_init(&alarms[ANDROID_ALARM_ELAPSED_REALTIME].u.hrt, +			CLOCK_BOOTTIME, HRTIMER_MODE_ABS); +	hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].u.hrt, +			CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + +	for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { +		alarms[i].type = i; +		if (!is_wakeup(i)) +			alarms[i].u.hrt.function = devalarm_hrthandler; +	} + +	wakeup_source_init(&alarm_wake_lock, "alarm"); +	return 0; +} + +static void  __exit alarm_dev_exit(void) +{ +	misc_deregister(&alarm_device); +	wakeup_source_trash(&alarm_wake_lock); +} + +module_init(alarm_dev_init); +module_exit(alarm_dev_exit); + diff --git a/drivers/staging/android/android_alarm.h b/drivers/staging/android/android_alarm.h new file mode 100644 index 00000000000..495b20cf3bf --- /dev/null +++ b/drivers/staging/android/android_alarm.h @@ -0,0 +1,41 @@ +/* include/linux/android_alarm.h + * + * Copyright (C) 2006-2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ANDROID_ALARM_H +#define _LINUX_ANDROID_ALARM_H + +#include <linux/compat.h> +#include <linux/ioctl.h> + +#include "uapi/android_alarm.h" + +#ifdef CONFIG_COMPAT +#define ANDROID_ALARM_SET_COMPAT(type)		ALARM_IOW(2, type, \ +							struct compat_timespec) +#define ANDROID_ALARM_SET_AND_WAIT_COMPAT(type)	ALARM_IOW(3, type, \ +							struct compat_timespec) +#define ANDROID_ALARM_GET_TIME_COMPAT(type)	ALARM_IOW(4, type, \ +							struct compat_timespec) +#define ANDROID_ALARM_SET_RTC_COMPAT		_IOW('a', 5, \ +							struct compat_timespec) +#define ANDROID_ALARM_IOCTL_NR(cmd)		(_IOC_NR(cmd) & ((1<<4)-1)) +#define ANDROID_ALARM_COMPAT_TO_NORM(cmd)  \ +				ALARM_IOW(ANDROID_ALARM_IOCTL_NR(cmd), \ +					ANDROID_ALARM_IOCTL_TO_TYPE(cmd), \ +					struct timespec) + +#endif + +#endif diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c new file mode 100644 index 00000000000..713a9722678 --- /dev/null +++ b/drivers/staging/android/ashmem.c @@ -0,0 +1,886 @@ +/* mm/ashmem.c + * + * Anonymous Shared Memory Subsystem, ashmem + * + * Copyright (C) 2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "ashmem: " fmt + +#include <linux/module.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/falloc.h> +#include <linux/miscdevice.h> +#include <linux/security.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/uaccess.h> +#include <linux/personality.h> +#include <linux/bitops.h> +#include <linux/mutex.h> +#include <linux/shmem_fs.h> +#include "ashmem.h" + +#define ASHMEM_NAME_PREFIX "dev/ashmem/" +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) +#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) + +/** + * struct ashmem_area - The anonymous shared memory area + * @name:		The optional name in /proc/pid/maps + * @unpinned_list:	The list of all ashmem areas + * @file:		The shmem-based backing file + * @size:		The size of the mapping, in bytes + * @prot_masks:		The allowed protection bits, as vm_flags + * + * The lifecycle of this structure is from our parent file's open() until + * its release(). It is also protected by 'ashmem_mutex' + * + * Warning: Mappings do NOT pin this structure; It dies on close() + */ +struct ashmem_area { +	char name[ASHMEM_FULL_NAME_LEN]; +	struct list_head unpinned_list; +	struct file *file; +	size_t size; +	unsigned long prot_mask; +}; + +/** + * struct ashmem_range - A range of unpinned/evictable pages + * @lru:	         The entry in the LRU list + * @unpinned:	         The entry in its area's unpinned list + * @asma:	         The associated anonymous shared memory area. + * @pgstart:	         The starting page (inclusive) + * @pgend:	         The ending page (inclusive) + * @purged:	         The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) + * + * The lifecycle of this structure is from unpin to pin. + * It is protected by 'ashmem_mutex' + */ +struct ashmem_range { +	struct list_head lru; +	struct list_head unpinned; +	struct ashmem_area *asma; +	size_t pgstart; +	size_t pgend; +	unsigned int purged; +}; + +/* LRU list of unpinned pages, protected by ashmem_mutex */ +static LIST_HEAD(ashmem_lru_list); + +/** + * long lru_count - The count of pages on our LRU list. + * + * This is protected by ashmem_mutex. + */ +static unsigned long lru_count; + +/** + * ashmem_mutex - protects the list of and each individual ashmem_area + * + * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem + */ +static DEFINE_MUTEX(ashmem_mutex); + +static struct kmem_cache *ashmem_area_cachep __read_mostly; +static struct kmem_cache *ashmem_range_cachep __read_mostly; + +#define range_size(range) \ +	((range)->pgend - (range)->pgstart + 1) + +#define range_on_lru(range) \ +	((range)->purged == ASHMEM_NOT_PURGED) + +#define page_range_subsumes_range(range, start, end) \ +	(((range)->pgstart >= (start)) && ((range)->pgend <= (end))) + +#define page_range_subsumed_by_range(range, start, end) \ +	(((range)->pgstart <= (start)) && ((range)->pgend >= (end))) + +#define page_in_range(range, page) \ +	(((range)->pgstart <= (page)) && ((range)->pgend >= (page))) + +#define page_range_in_range(range, start, end) \ +	(page_in_range(range, start) || page_in_range(range, end) || \ +		page_range_subsumes_range(range, start, end)) + +#define range_before_page(range, page) \ +	((range)->pgend < (page)) + +#define PROT_MASK		(PROT_EXEC | PROT_READ | PROT_WRITE) + +/** + * lru_add() - Adds a range of memory to the LRU list + * @range:     The memory range being added. + * + * The range is first added to the end (tail) of the LRU list. + * After this, the size of the range is added to @lru_count + */ +static inline void lru_add(struct ashmem_range *range) +{ +	list_add_tail(&range->lru, &ashmem_lru_list); +	lru_count += range_size(range); +} + +/** + * lru_del() - Removes a range of memory from the LRU list + * @range:     The memory range being removed + * + * The range is first deleted from the LRU list. + * After this, the size of the range is removed from @lru_count + */ +static inline void lru_del(struct ashmem_range *range) +{ +	list_del(&range->lru); +	lru_count -= range_size(range); +} + +/** + * range_alloc() - Allocates and initializes a new ashmem_range structure + * @asma:	   The associated ashmem_area + * @prev_range:	   The previous ashmem_range in the sorted asma->unpinned list + * @purged:	   Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) + * @start:	   The starting page (inclusive) + * @end:	   The ending page (inclusive) + * + * This function is protected by ashmem_mutex. + * + * Return: 0 if successful, or -ENOMEM if there is an error + */ +static int range_alloc(struct ashmem_area *asma, +		       struct ashmem_range *prev_range, unsigned int purged, +		       size_t start, size_t end) +{ +	struct ashmem_range *range; + +	range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); +	if (unlikely(!range)) +		return -ENOMEM; + +	range->asma = asma; +	range->pgstart = start; +	range->pgend = end; +	range->purged = purged; + +	list_add_tail(&range->unpinned, &prev_range->unpinned); + +	if (range_on_lru(range)) +		lru_add(range); + +	return 0; +} + +/** + * range_del() - Deletes and dealloctes an ashmem_range structure + * @range:	 The associated ashmem_range that has previously been allocated + */ +static void range_del(struct ashmem_range *range) +{ +	list_del(&range->unpinned); +	if (range_on_lru(range)) +		lru_del(range); +	kmem_cache_free(ashmem_range_cachep, range); +} + +/** + * range_shrink() - Shrinks an ashmem_range + * @range:	    The associated ashmem_range being shrunk + * @start:	    The starting byte of the new range + * @end:	    The ending byte of the new range + * + * This does not modify the data inside the existing range in any way - It + * simply shrinks the boundaries of the range. + * + * Theoretically, with a little tweaking, this could eventually be changed + * to range_resize, and expand the lru_count if the new range is larger. + */ +static inline void range_shrink(struct ashmem_range *range, +				size_t start, size_t end) +{ +	size_t pre = range_size(range); + +	range->pgstart = start; +	range->pgend = end; + +	if (range_on_lru(range)) +		lru_count -= pre - range_size(range); +} + +/** + * ashmem_open() - Opens an Anonymous Shared Memory structure + * @inode:	   The backing file's index node(?) + * @file:	   The backing file + * + * Please note that the ashmem_area is not returned by this function - It is + * instead written to "file->private_data". + * + * Return: 0 if successful, or another code if unsuccessful. + */ +static int ashmem_open(struct inode *inode, struct file *file) +{ +	struct ashmem_area *asma; +	int ret; + +	ret = generic_file_open(inode, file); +	if (unlikely(ret)) +		return ret; + +	asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); +	if (unlikely(!asma)) +		return -ENOMEM; + +	INIT_LIST_HEAD(&asma->unpinned_list); +	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); +	asma->prot_mask = PROT_MASK; +	file->private_data = asma; + +	return 0; +} + +/** + * ashmem_release() - Releases an Anonymous Shared Memory structure + * @ignored:	      The backing file's Index Node(?) - It is ignored here. + * @file:	      The backing file + * + * Return: 0 if successful. If it is anything else, go have a coffee and + * try again. + */ +static int ashmem_release(struct inode *ignored, struct file *file) +{ +	struct ashmem_area *asma = file->private_data; +	struct ashmem_range *range, *next; + +	mutex_lock(&ashmem_mutex); +	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) +		range_del(range); +	mutex_unlock(&ashmem_mutex); + +	if (asma->file) +		fput(asma->file); +	kmem_cache_free(ashmem_area_cachep, asma); + +	return 0; +} + +/** + * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file + * @file:	   The associated backing file. + * @buf:	   The buffer of data being written to + * @len:	   The number of bytes being read + * @pos:	   The position of the first byte to read. + * + * Return: 0 if successful, or another return code if not. + */ +static ssize_t ashmem_read(struct file *file, char __user *buf, +			   size_t len, loff_t *pos) +{ +	struct ashmem_area *asma = file->private_data; +	int ret = 0; + +	mutex_lock(&ashmem_mutex); + +	/* If size is not set, or set to 0, always return EOF. */ +	if (asma->size == 0) +		goto out_unlock; + +	if (!asma->file) { +		ret = -EBADF; +		goto out_unlock; +	} + +	mutex_unlock(&ashmem_mutex); + +	/* +	 * asma and asma->file are used outside the lock here.  We assume +	 * once asma->file is set it will never be changed, and will not +	 * be destroyed until all references to the file are dropped and +	 * ashmem_release is called. +	 */ +	ret = asma->file->f_op->read(asma->file, buf, len, pos); +	if (ret >= 0) { +		/** Update backing file pos, since f_ops->read() doesn't */ +		asma->file->f_pos = *pos; +	} +	return ret; + +out_unlock: +	mutex_unlock(&ashmem_mutex); +	return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ +	struct ashmem_area *asma = file->private_data; +	int ret; + +	mutex_lock(&ashmem_mutex); + +	if (asma->size == 0) { +		ret = -EINVAL; +		goto out; +	} + +	if (!asma->file) { +		ret = -EBADF; +		goto out; +	} + +	ret = asma->file->f_op->llseek(asma->file, offset, origin); +	if (ret < 0) +		goto out; + +	/** Copy f_pos from backing file, since f_ops->llseek() sets it */ +	file->f_pos = asma->file->f_pos; + +out: +	mutex_unlock(&ashmem_mutex); +	return ret; +} + +static inline vm_flags_t calc_vm_may_flags(unsigned long prot) +{ +	return _calc_vm_trans(prot, PROT_READ,  VM_MAYREAD) | +	       _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | +	       _calc_vm_trans(prot, PROT_EXEC,  VM_MAYEXEC); +} + +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) +{ +	struct ashmem_area *asma = file->private_data; +	int ret = 0; + +	mutex_lock(&ashmem_mutex); + +	/* user needs to SET_SIZE before mapping */ +	if (unlikely(!asma->size)) { +		ret = -EINVAL; +		goto out; +	} + +	/* requested protection bits must match our allowed protection mask */ +	if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & +		     calc_vm_prot_bits(PROT_MASK))) { +		ret = -EPERM; +		goto out; +	} +	vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + +	if (!asma->file) { +		char *name = ASHMEM_NAME_DEF; +		struct file *vmfile; + +		if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') +			name = asma->name; + +		/* ... and allocate the backing shmem file */ +		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); +		if (unlikely(IS_ERR(vmfile))) { +			ret = PTR_ERR(vmfile); +			goto out; +		} +		asma->file = vmfile; +	} +	get_file(asma->file); + +	/* +	 * XXX - Reworked to use shmem_zero_setup() instead of +	 * shmem_set_file while we're in staging. -jstultz +	 */ +	if (vma->vm_flags & VM_SHARED) { +		ret = shmem_zero_setup(vma); +		if (ret) { +			fput(asma->file); +			goto out; +		} +	} + +	if (vma->vm_file) +		fput(vma->vm_file); +	vma->vm_file = asma->file; + +out: +	mutex_unlock(&ashmem_mutex); +	return ret; +} + +/* + * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab + * + * 'nr_to_scan' is the number of objects to scan for freeing. + * + * 'gfp_mask' is the mask of the allocation that got us into this mess. + * + * Return value is the number of objects freed or -1 if we cannot + * proceed without risk of deadlock (due to gfp_mask). + * + * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +static unsigned long +ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) +{ +	struct ashmem_range *range, *next; +	unsigned long freed = 0; + +	/* We might recurse into filesystem code, so bail out if necessary */ +	if (!(sc->gfp_mask & __GFP_FS)) +		return SHRINK_STOP; + +	mutex_lock(&ashmem_mutex); +	list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { +		loff_t start = range->pgstart * PAGE_SIZE; +		loff_t end = (range->pgend + 1) * PAGE_SIZE; + +		do_fallocate(range->asma->file, +				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, +				start, end - start); +		range->purged = ASHMEM_WAS_PURGED; +		lru_del(range); + +		freed += range_size(range); +		if (--sc->nr_to_scan <= 0) +			break; +	} +	mutex_unlock(&ashmem_mutex); +	return freed; +} + +static unsigned long +ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) +{ +	/* +	 * note that lru_count is count of pages on the lru, not a count of +	 * objects on the list. This means the scan function needs to return the +	 * number of pages freed, not the number of objects scanned. +	 */ +	return lru_count; +} + +static struct shrinker ashmem_shrinker = { +	.count_objects = ashmem_shrink_count, +	.scan_objects = ashmem_shrink_scan, +	/* +	 * XXX (dchinner): I wish people would comment on why they need on +	 * significant changes to the default value here +	 */ +	.seeks = DEFAULT_SEEKS * 4, +}; + +static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) +{ +	int ret = 0; + +	mutex_lock(&ashmem_mutex); + +	/* the user can only remove, not add, protection bits */ +	if (unlikely((asma->prot_mask & prot) != prot)) { +		ret = -EINVAL; +		goto out; +	} + +	/* does the application expect PROT_READ to imply PROT_EXEC? */ +	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) +		prot |= PROT_EXEC; + +	asma->prot_mask = prot; + +out: +	mutex_unlock(&ashmem_mutex); +	return ret; +} + +static int set_name(struct ashmem_area *asma, void __user *name) +{ +	int len; +	int ret = 0; +	char local_name[ASHMEM_NAME_LEN]; + +	/* +	 * Holding the ashmem_mutex while doing a copy_from_user might cause +	 * an data abort which would try to access mmap_sem. If another +	 * thread has invoked ashmem_mmap then it will be holding the +	 * semaphore and will be waiting for ashmem_mutex, there by leading to +	 * deadlock. We'll release the mutex  and take the name to a local +	 * variable that does not need protection and later copy the local +	 * variable to the structure member with lock held. +	 */ +	len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); +	if (len < 0) +		return len; +	if (len == ASHMEM_NAME_LEN) +		local_name[ASHMEM_NAME_LEN - 1] = '\0'; +	mutex_lock(&ashmem_mutex); +	/* cannot change an existing mapping's name */ +	if (unlikely(asma->file)) +		ret = -EINVAL; +	else +		strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + +	mutex_unlock(&ashmem_mutex); +	return ret; +} + +static int get_name(struct ashmem_area *asma, void __user *name) +{ +	int ret = 0; +	size_t len; +	/* +	 * Have a local variable to which we'll copy the content +	 * from asma with the lock held. Later we can copy this to the user +	 * space safely without holding any locks. So even if we proceed to +	 * wait for mmap_sem, it won't lead to deadlock. +	 */ +	char local_name[ASHMEM_NAME_LEN]; + +	mutex_lock(&ashmem_mutex); +	if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { + +		/* +		 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes +		 * prevents us from revealing one user's stack to another. +		 */ +		len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; +		memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); +	} else { +		len = sizeof(ASHMEM_NAME_DEF); +		memcpy(local_name, ASHMEM_NAME_DEF, len); +	} +	mutex_unlock(&ashmem_mutex); + +	/* +	 * Now we are just copying from the stack variable to userland +	 * No lock held +	 */ +	if (unlikely(copy_to_user(name, local_name, len))) +		ret = -EFAULT; +	return ret; +} + +/* + * ashmem_pin - pin the given ashmem region, returning whether it was + * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ +	struct ashmem_range *range, *next; +	int ret = ASHMEM_NOT_PURGED; + +	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { +		/* moved past last applicable page; we can short circuit */ +		if (range_before_page(range, pgstart)) +			break; + +		/* +		 * The user can ask us to pin pages that span multiple ranges, +		 * or to pin pages that aren't even unpinned, so this is messy. +		 * +		 * Four cases: +		 * 1. The requested range subsumes an existing range, so we +		 *    just remove the entire matching range. +		 * 2. The requested range overlaps the start of an existing +		 *    range, so we just update that range. +		 * 3. The requested range overlaps the end of an existing +		 *    range, so we just update that range. +		 * 4. The requested range punches a hole in an existing range, +		 *    so we have to update one side of the range and then +		 *    create a new range for the other side. +		 */ +		if (page_range_in_range(range, pgstart, pgend)) { +			ret |= range->purged; + +			/* Case #1: Easy. Just nuke the whole thing. */ +			if (page_range_subsumes_range(range, pgstart, pgend)) { +				range_del(range); +				continue; +			} + +			/* Case #2: We overlap from the start, so adjust it */ +			if (range->pgstart >= pgstart) { +				range_shrink(range, pgend + 1, range->pgend); +				continue; +			} + +			/* Case #3: We overlap from the rear, so adjust it */ +			if (range->pgend <= pgend) { +				range_shrink(range, range->pgstart, pgstart-1); +				continue; +			} + +			/* +			 * Case #4: We eat a chunk out of the middle. A bit +			 * more complicated, we allocate a new range for the +			 * second half and adjust the first chunk's endpoint. +			 */ +			range_alloc(asma, range, range->purged, +				    pgend + 1, range->pgend); +			range_shrink(range, range->pgstart, pgstart - 1); +			break; +		} +	} + +	return ret; +} + +/* + * ashmem_unpin - unpin the given range of pages. Returns zero on success. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) +{ +	struct ashmem_range *range, *next; +	unsigned int purged = ASHMEM_NOT_PURGED; + +restart: +	list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { +		/* short circuit: this is our insertion point */ +		if (range_before_page(range, pgstart)) +			break; + +		/* +		 * The user can ask us to unpin pages that are already entirely +		 * or partially pinned. We handle those two cases here. +		 */ +		if (page_range_subsumed_by_range(range, pgstart, pgend)) +			return 0; +		if (page_range_in_range(range, pgstart, pgend)) { +			pgstart = min_t(size_t, range->pgstart, pgstart), +			pgend = max_t(size_t, range->pgend, pgend); +			purged |= range->purged; +			range_del(range); +			goto restart; +		} +	} + +	return range_alloc(asma, range, purged, pgstart, pgend); +} + +/* + * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the + * given interval are unpinned and ASHMEM_IS_PINNED otherwise. + * + * Caller must hold ashmem_mutex. + */ +static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, +				 size_t pgend) +{ +	struct ashmem_range *range; +	int ret = ASHMEM_IS_PINNED; + +	list_for_each_entry(range, &asma->unpinned_list, unpinned) { +		if (range_before_page(range, pgstart)) +			break; +		if (page_range_in_range(range, pgstart, pgend)) { +			ret = ASHMEM_IS_UNPINNED; +			break; +		} +	} + +	return ret; +} + +static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, +			    void __user *p) +{ +	struct ashmem_pin pin; +	size_t pgstart, pgend; +	int ret = -EINVAL; + +	if (unlikely(!asma->file)) +		return -EINVAL; + +	if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) +		return -EFAULT; + +	/* per custom, you can pass zero for len to mean "everything onward" */ +	if (!pin.len) +		pin.len = PAGE_ALIGN(asma->size) - pin.offset; + +	if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) +		return -EINVAL; + +	if (unlikely(((__u32) -1) - pin.offset < pin.len)) +		return -EINVAL; + +	if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) +		return -EINVAL; + +	pgstart = pin.offset / PAGE_SIZE; +	pgend = pgstart + (pin.len / PAGE_SIZE) - 1; + +	mutex_lock(&ashmem_mutex); + +	switch (cmd) { +	case ASHMEM_PIN: +		ret = ashmem_pin(asma, pgstart, pgend); +		break; +	case ASHMEM_UNPIN: +		ret = ashmem_unpin(asma, pgstart, pgend); +		break; +	case ASHMEM_GET_PIN_STATUS: +		ret = ashmem_get_pin_status(asma, pgstart, pgend); +		break; +	} + +	mutex_unlock(&ashmem_mutex); + +	return ret; +} + +static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ +	struct ashmem_area *asma = file->private_data; +	long ret = -ENOTTY; + +	switch (cmd) { +	case ASHMEM_SET_NAME: +		ret = set_name(asma, (void __user *) arg); +		break; +	case ASHMEM_GET_NAME: +		ret = get_name(asma, (void __user *) arg); +		break; +	case ASHMEM_SET_SIZE: +		ret = -EINVAL; +		if (!asma->file) { +			ret = 0; +			asma->size = (size_t) arg; +		} +		break; +	case ASHMEM_GET_SIZE: +		ret = asma->size; +		break; +	case ASHMEM_SET_PROT_MASK: +		ret = set_prot_mask(asma, arg); +		break; +	case ASHMEM_GET_PROT_MASK: +		ret = asma->prot_mask; +		break; +	case ASHMEM_PIN: +	case ASHMEM_UNPIN: +	case ASHMEM_GET_PIN_STATUS: +		ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); +		break; +	case ASHMEM_PURGE_ALL_CACHES: +		ret = -EPERM; +		if (capable(CAP_SYS_ADMIN)) { +			struct shrink_control sc = { +				.gfp_mask = GFP_KERNEL, +				.nr_to_scan = LONG_MAX, +			}; +			ret = ashmem_shrink_count(&ashmem_shrinker, &sc); +			nodes_setall(sc.nodes_to_scan); +			ashmem_shrink_scan(&ashmem_shrinker, &sc); +		} +		break; +	} + +	return ret; +} + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, +				unsigned long arg) +{ + +	switch (cmd) { +	case COMPAT_ASHMEM_SET_SIZE: +		cmd = ASHMEM_SET_SIZE; +		break; +	case COMPAT_ASHMEM_SET_PROT_MASK: +		cmd = ASHMEM_SET_PROT_MASK; +		break; +	} +	return ashmem_ioctl(file, cmd, arg); +} +#endif + +static const struct file_operations ashmem_fops = { +	.owner = THIS_MODULE, +	.open = ashmem_open, +	.release = ashmem_release, +	.read = ashmem_read, +	.llseek = ashmem_llseek, +	.mmap = ashmem_mmap, +	.unlocked_ioctl = ashmem_ioctl, +#ifdef CONFIG_COMPAT +	.compat_ioctl = compat_ashmem_ioctl, +#endif +}; + +static struct miscdevice ashmem_misc = { +	.minor = MISC_DYNAMIC_MINOR, +	.name = "ashmem", +	.fops = &ashmem_fops, +}; + +static int __init ashmem_init(void) +{ +	int ret; + +	ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", +					  sizeof(struct ashmem_area), +					  0, 0, NULL); +	if (unlikely(!ashmem_area_cachep)) { +		pr_err("failed to create slab cache\n"); +		return -ENOMEM; +	} + +	ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", +					  sizeof(struct ashmem_range), +					  0, 0, NULL); +	if (unlikely(!ashmem_range_cachep)) { +		pr_err("failed to create slab cache\n"); +		return -ENOMEM; +	} + +	ret = misc_register(&ashmem_misc); +	if (unlikely(ret)) { +		pr_err("failed to register misc device!\n"); +		return ret; +	} + +	register_shrinker(&ashmem_shrinker); + +	pr_info("initialized\n"); + +	return 0; +} + +static void __exit ashmem_exit(void) +{ +	int ret; + +	unregister_shrinker(&ashmem_shrinker); + +	ret = misc_deregister(&ashmem_misc); +	if (unlikely(ret)) +		pr_err("failed to unregister misc device!\n"); + +	kmem_cache_destroy(ashmem_range_cachep); +	kmem_cache_destroy(ashmem_area_cachep); + +	pr_info("unloaded\n"); +} + +module_init(ashmem_init); +module_exit(ashmem_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h new file mode 100644 index 00000000000..5abcfd7aa70 --- /dev/null +++ b/drivers/staging/android/ashmem.h @@ -0,0 +1,27 @@ +/* + * include/linux/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed.  It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _LINUX_ASHMEM_H +#define _LINUX_ASHMEM_H + +#include <linux/limits.h> +#include <linux/ioctl.h> +#include <linux/compat.h> + +#include "uapi/ashmem.h" + +/* support of 32bit userspace on 64bit platforms */ +#ifdef CONFIG_COMPAT +#define COMPAT_ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, compat_size_t) +#define COMPAT_ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned int) +#endif + +#endif	/* _LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c new file mode 100644 index 00000000000..a741da77828 --- /dev/null +++ b/drivers/staging/android/binder.c @@ -0,0 +1,3637 @@ +/* binder.c + * + * Android IPC Subsystem + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm/cacheflush.h> +#include <linux/fdtable.h> +#include <linux/file.h> +#include <linux/freezer.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nsproxy.h> +#include <linux/poll.h> +#include <linux/debugfs.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/slab.h> +#include <linux/pid_namespace.h> + +#include "binder.h" +#include "binder_trace.h" + +static DEFINE_MUTEX(binder_main_lock); +static DEFINE_MUTEX(binder_deferred_lock); +static DEFINE_MUTEX(binder_mmap_lock); + +static HLIST_HEAD(binder_procs); +static HLIST_HEAD(binder_deferred_list); +static HLIST_HEAD(binder_dead_nodes); + +static struct dentry *binder_debugfs_dir_entry_root; +static struct dentry *binder_debugfs_dir_entry_proc; +static struct binder_node *binder_context_mgr_node; +static kuid_t binder_context_mgr_uid = INVALID_UID; +static int binder_last_id; +static struct workqueue_struct *binder_deferred_workqueue; + +#define BINDER_DEBUG_ENTRY(name) \ +static int binder_##name##_open(struct inode *inode, struct file *file) \ +{ \ +	return single_open(file, binder_##name##_show, inode->i_private); \ +} \ +\ +static const struct file_operations binder_##name##_fops = { \ +	.owner = THIS_MODULE, \ +	.open = binder_##name##_open, \ +	.read = seq_read, \ +	.llseek = seq_lseek, \ +	.release = single_release, \ +} + +static int binder_proc_show(struct seq_file *m, void *unused); +BINDER_DEBUG_ENTRY(proc); + +/* This is only defined in include/asm-arm/sizes.h */ +#ifndef SZ_1K +#define SZ_1K                               0x400 +#endif + +#ifndef SZ_4M +#define SZ_4M                               0x400000 +#endif + +#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE) + +#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) + +enum { +	BINDER_DEBUG_USER_ERROR             = 1U << 0, +	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1, +	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2, +	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3, +	BINDER_DEBUG_DEAD_BINDER            = 1U << 4, +	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5, +	BINDER_DEBUG_READ_WRITE             = 1U << 6, +	BINDER_DEBUG_USER_REFS              = 1U << 7, +	BINDER_DEBUG_THREADS                = 1U << 8, +	BINDER_DEBUG_TRANSACTION            = 1U << 9, +	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10, +	BINDER_DEBUG_FREE_BUFFER            = 1U << 11, +	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12, +	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13, +	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14, +	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15, +}; +static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | +	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; +module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); + +static bool binder_debug_no_lock; +module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); + +static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); +static int binder_stop_on_user_error; + +static int binder_set_stop_on_user_error(const char *val, +					 struct kernel_param *kp) +{ +	int ret; + +	ret = param_set_int(val, kp); +	if (binder_stop_on_user_error < 2) +		wake_up(&binder_user_error_wait); +	return ret; +} +module_param_call(stop_on_user_error, binder_set_stop_on_user_error, +	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); + +#define binder_debug(mask, x...) \ +	do { \ +		if (binder_debug_mask & mask) \ +			pr_info(x); \ +	} while (0) + +#define binder_user_error(x...) \ +	do { \ +		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ +			pr_info(x); \ +		if (binder_stop_on_user_error) \ +			binder_stop_on_user_error = 2; \ +	} while (0) + +enum binder_stat_types { +	BINDER_STAT_PROC, +	BINDER_STAT_THREAD, +	BINDER_STAT_NODE, +	BINDER_STAT_REF, +	BINDER_STAT_DEATH, +	BINDER_STAT_TRANSACTION, +	BINDER_STAT_TRANSACTION_COMPLETE, +	BINDER_STAT_COUNT +}; + +struct binder_stats { +	int br[_IOC_NR(BR_FAILED_REPLY) + 1]; +	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; +	int obj_created[BINDER_STAT_COUNT]; +	int obj_deleted[BINDER_STAT_COUNT]; +}; + +static struct binder_stats binder_stats; + +static inline void binder_stats_deleted(enum binder_stat_types type) +{ +	binder_stats.obj_deleted[type]++; +} + +static inline void binder_stats_created(enum binder_stat_types type) +{ +	binder_stats.obj_created[type]++; +} + +struct binder_transaction_log_entry { +	int debug_id; +	int call_type; +	int from_proc; +	int from_thread; +	int target_handle; +	int to_proc; +	int to_thread; +	int to_node; +	int data_size; +	int offsets_size; +}; +struct binder_transaction_log { +	int next; +	int full; +	struct binder_transaction_log_entry entry[32]; +}; +static struct binder_transaction_log binder_transaction_log; +static struct binder_transaction_log binder_transaction_log_failed; + +static struct binder_transaction_log_entry *binder_transaction_log_add( +	struct binder_transaction_log *log) +{ +	struct binder_transaction_log_entry *e; + +	e = &log->entry[log->next]; +	memset(e, 0, sizeof(*e)); +	log->next++; +	if (log->next == ARRAY_SIZE(log->entry)) { +		log->next = 0; +		log->full = 1; +	} +	return e; +} + +struct binder_work { +	struct list_head entry; +	enum { +		BINDER_WORK_TRANSACTION = 1, +		BINDER_WORK_TRANSACTION_COMPLETE, +		BINDER_WORK_NODE, +		BINDER_WORK_DEAD_BINDER, +		BINDER_WORK_DEAD_BINDER_AND_CLEAR, +		BINDER_WORK_CLEAR_DEATH_NOTIFICATION, +	} type; +}; + +struct binder_node { +	int debug_id; +	struct binder_work work; +	union { +		struct rb_node rb_node; +		struct hlist_node dead_node; +	}; +	struct binder_proc *proc; +	struct hlist_head refs; +	int internal_strong_refs; +	int local_weak_refs; +	int local_strong_refs; +	binder_uintptr_t ptr; +	binder_uintptr_t cookie; +	unsigned has_strong_ref:1; +	unsigned pending_strong_ref:1; +	unsigned has_weak_ref:1; +	unsigned pending_weak_ref:1; +	unsigned has_async_transaction:1; +	unsigned accept_fds:1; +	unsigned min_priority:8; +	struct list_head async_todo; +}; + +struct binder_ref_death { +	struct binder_work work; +	binder_uintptr_t cookie; +}; + +struct binder_ref { +	/* Lookups needed: */ +	/*   node + proc => ref (transaction) */ +	/*   desc + proc => ref (transaction, inc/dec ref) */ +	/*   node => refs + procs (proc exit) */ +	int debug_id; +	struct rb_node rb_node_desc; +	struct rb_node rb_node_node; +	struct hlist_node node_entry; +	struct binder_proc *proc; +	struct binder_node *node; +	uint32_t desc; +	int strong; +	int weak; +	struct binder_ref_death *death; +}; + +struct binder_buffer { +	struct list_head entry; /* free and allocated entries by address */ +	struct rb_node rb_node; /* free entry by size or allocated entry */ +				/* by address */ +	unsigned free:1; +	unsigned allow_user_free:1; +	unsigned async_transaction:1; +	unsigned debug_id:29; + +	struct binder_transaction *transaction; + +	struct binder_node *target_node; +	size_t data_size; +	size_t offsets_size; +	uint8_t data[0]; +}; + +enum binder_deferred_state { +	BINDER_DEFERRED_PUT_FILES    = 0x01, +	BINDER_DEFERRED_FLUSH        = 0x02, +	BINDER_DEFERRED_RELEASE      = 0x04, +}; + +struct binder_proc { +	struct hlist_node proc_node; +	struct rb_root threads; +	struct rb_root nodes; +	struct rb_root refs_by_desc; +	struct rb_root refs_by_node; +	int pid; +	struct vm_area_struct *vma; +	struct mm_struct *vma_vm_mm; +	struct task_struct *tsk; +	struct files_struct *files; +	struct hlist_node deferred_work_node; +	int deferred_work; +	void *buffer; +	ptrdiff_t user_buffer_offset; + +	struct list_head buffers; +	struct rb_root free_buffers; +	struct rb_root allocated_buffers; +	size_t free_async_space; + +	struct page **pages; +	size_t buffer_size; +	uint32_t buffer_free; +	struct list_head todo; +	wait_queue_head_t wait; +	struct binder_stats stats; +	struct list_head delivered_death; +	int max_threads; +	int requested_threads; +	int requested_threads_started; +	int ready_threads; +	long default_priority; +	struct dentry *debugfs_entry; +}; + +enum { +	BINDER_LOOPER_STATE_REGISTERED  = 0x01, +	BINDER_LOOPER_STATE_ENTERED     = 0x02, +	BINDER_LOOPER_STATE_EXITED      = 0x04, +	BINDER_LOOPER_STATE_INVALID     = 0x08, +	BINDER_LOOPER_STATE_WAITING     = 0x10, +	BINDER_LOOPER_STATE_NEED_RETURN = 0x20 +}; + +struct binder_thread { +	struct binder_proc *proc; +	struct rb_node rb_node; +	int pid; +	int looper; +	struct binder_transaction *transaction_stack; +	struct list_head todo; +	uint32_t return_error; /* Write failed, return error code in read buf */ +	uint32_t return_error2; /* Write failed, return error code in read */ +		/* buffer. Used when sending a reply to a dead process that */ +		/* we are also waiting on */ +	wait_queue_head_t wait; +	struct binder_stats stats; +}; + +struct binder_transaction { +	int debug_id; +	struct binder_work work; +	struct binder_thread *from; +	struct binder_transaction *from_parent; +	struct binder_proc *to_proc; +	struct binder_thread *to_thread; +	struct binder_transaction *to_parent; +	unsigned need_reply:1; +	/* unsigned is_dead:1; */	/* not used at the moment */ + +	struct binder_buffer *buffer; +	unsigned int	code; +	unsigned int	flags; +	long	priority; +	long	saved_priority; +	kuid_t	sender_euid; +}; + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); + +static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) +{ +	struct files_struct *files = proc->files; +	unsigned long rlim_cur; +	unsigned long irqs; + +	if (files == NULL) +		return -ESRCH; + +	if (!lock_task_sighand(proc->tsk, &irqs)) +		return -EMFILE; + +	rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); +	unlock_task_sighand(proc->tsk, &irqs); + +	return __alloc_fd(files, 0, rlim_cur, flags); +} + +/* + * copied from fd_install + */ +static void task_fd_install( +	struct binder_proc *proc, unsigned int fd, struct file *file) +{ +	if (proc->files) +		__fd_install(proc->files, fd, file); +} + +/* + * copied from sys_close + */ +static long task_close_fd(struct binder_proc *proc, unsigned int fd) +{ +	int retval; + +	if (proc->files == NULL) +		return -ESRCH; + +	retval = __close_fd(proc->files, fd); +	/* can't restart close syscall because file table entry was cleared */ +	if (unlikely(retval == -ERESTARTSYS || +		     retval == -ERESTARTNOINTR || +		     retval == -ERESTARTNOHAND || +		     retval == -ERESTART_RESTARTBLOCK)) +		retval = -EINTR; + +	return retval; +} + +static inline void binder_lock(const char *tag) +{ +	trace_binder_lock(tag); +	mutex_lock(&binder_main_lock); +	trace_binder_locked(tag); +} + +static inline void binder_unlock(const char *tag) +{ +	trace_binder_unlock(tag); +	mutex_unlock(&binder_main_lock); +} + +static void binder_set_nice(long nice) +{ +	long min_nice; + +	if (can_nice(current, nice)) { +		set_user_nice(current, nice); +		return; +	} +	min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); +	binder_debug(BINDER_DEBUG_PRIORITY_CAP, +		     "%d: nice value %ld not allowed use %ld instead\n", +		      current->pid, nice, min_nice); +	set_user_nice(current, min_nice); +	if (min_nice <= MAX_NICE) +		return; +	binder_user_error("%d RLIMIT_NICE not set\n", current->pid); +} + +static size_t binder_buffer_size(struct binder_proc *proc, +				 struct binder_buffer *buffer) +{ +	if (list_is_last(&buffer->entry, &proc->buffers)) +		return proc->buffer + proc->buffer_size - (void *)buffer->data; +	else +		return (size_t)list_entry(buffer->entry.next, +			struct binder_buffer, entry) - (size_t)buffer->data; +} + +static void binder_insert_free_buffer(struct binder_proc *proc, +				      struct binder_buffer *new_buffer) +{ +	struct rb_node **p = &proc->free_buffers.rb_node; +	struct rb_node *parent = NULL; +	struct binder_buffer *buffer; +	size_t buffer_size; +	size_t new_buffer_size; + +	BUG_ON(!new_buffer->free); + +	new_buffer_size = binder_buffer_size(proc, new_buffer); + +	binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +		     "%d: add free buffer, size %zd, at %p\n", +		      proc->pid, new_buffer_size, new_buffer); + +	while (*p) { +		parent = *p; +		buffer = rb_entry(parent, struct binder_buffer, rb_node); +		BUG_ON(!buffer->free); + +		buffer_size = binder_buffer_size(proc, buffer); + +		if (new_buffer_size < buffer_size) +			p = &parent->rb_left; +		else +			p = &parent->rb_right; +	} +	rb_link_node(&new_buffer->rb_node, parent, p); +	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); +} + +static void binder_insert_allocated_buffer(struct binder_proc *proc, +					   struct binder_buffer *new_buffer) +{ +	struct rb_node **p = &proc->allocated_buffers.rb_node; +	struct rb_node *parent = NULL; +	struct binder_buffer *buffer; + +	BUG_ON(new_buffer->free); + +	while (*p) { +		parent = *p; +		buffer = rb_entry(parent, struct binder_buffer, rb_node); +		BUG_ON(buffer->free); + +		if (new_buffer < buffer) +			p = &parent->rb_left; +		else if (new_buffer > buffer) +			p = &parent->rb_right; +		else +			BUG(); +	} +	rb_link_node(&new_buffer->rb_node, parent, p); +	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); +} + +static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, +						  uintptr_t user_ptr) +{ +	struct rb_node *n = proc->allocated_buffers.rb_node; +	struct binder_buffer *buffer; +	struct binder_buffer *kern_ptr; + +	kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset +		- offsetof(struct binder_buffer, data)); + +	while (n) { +		buffer = rb_entry(n, struct binder_buffer, rb_node); +		BUG_ON(buffer->free); + +		if (kern_ptr < buffer) +			n = n->rb_left; +		else if (kern_ptr > buffer) +			n = n->rb_right; +		else +			return buffer; +	} +	return NULL; +} + +static int binder_update_page_range(struct binder_proc *proc, int allocate, +				    void *start, void *end, +				    struct vm_area_struct *vma) +{ +	void *page_addr; +	unsigned long user_page_addr; +	struct vm_struct tmp_area; +	struct page **page; +	struct mm_struct *mm; + +	binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +		     "%d: %s pages %p-%p\n", proc->pid, +		     allocate ? "allocate" : "free", start, end); + +	if (end <= start) +		return 0; + +	trace_binder_update_page_range(proc, allocate, start, end); + +	if (vma) +		mm = NULL; +	else +		mm = get_task_mm(proc->tsk); + +	if (mm) { +		down_write(&mm->mmap_sem); +		vma = proc->vma; +		if (vma && mm != proc->vma_vm_mm) { +			pr_err("%d: vma mm and task mm mismatch\n", +				proc->pid); +			vma = NULL; +		} +	} + +	if (allocate == 0) +		goto free_range; + +	if (vma == NULL) { +		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", +			proc->pid); +		goto err_no_vma; +	} + +	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { +		int ret; +		struct page **page_array_ptr; + +		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; + +		BUG_ON(*page); +		*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); +		if (*page == NULL) { +			pr_err("%d: binder_alloc_buf failed for page at %p\n", +				proc->pid, page_addr); +			goto err_alloc_page_failed; +		} +		tmp_area.addr = page_addr; +		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; +		page_array_ptr = page; +		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); +		if (ret) { +			pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", +			       proc->pid, page_addr); +			goto err_map_kernel_failed; +		} +		user_page_addr = +			(uintptr_t)page_addr + proc->user_buffer_offset; +		ret = vm_insert_page(vma, user_page_addr, page[0]); +		if (ret) { +			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", +			       proc->pid, user_page_addr); +			goto err_vm_insert_page_failed; +		} +		/* vm_insert_page does not seem to increment the refcount */ +	} +	if (mm) { +		up_write(&mm->mmap_sem); +		mmput(mm); +	} +	return 0; + +free_range: +	for (page_addr = end - PAGE_SIZE; page_addr >= start; +	     page_addr -= PAGE_SIZE) { +		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; +		if (vma) +			zap_page_range(vma, (uintptr_t)page_addr + +				proc->user_buffer_offset, PAGE_SIZE, NULL); +err_vm_insert_page_failed: +		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +err_map_kernel_failed: +		__free_page(*page); +		*page = NULL; +err_alloc_page_failed: +		; +	} +err_no_vma: +	if (mm) { +		up_write(&mm->mmap_sem); +		mmput(mm); +	} +	return -ENOMEM; +} + +static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, +					      size_t data_size, +					      size_t offsets_size, int is_async) +{ +	struct rb_node *n = proc->free_buffers.rb_node; +	struct binder_buffer *buffer; +	size_t buffer_size; +	struct rb_node *best_fit = NULL; +	void *has_page_addr; +	void *end_page_addr; +	size_t size; + +	if (proc->vma == NULL) { +		pr_err("%d: binder_alloc_buf, no vma\n", +		       proc->pid); +		return NULL; +	} + +	size = ALIGN(data_size, sizeof(void *)) + +		ALIGN(offsets_size, sizeof(void *)); + +	if (size < data_size || size < offsets_size) { +		binder_user_error("%d: got transaction with invalid size %zd-%zd\n", +				proc->pid, data_size, offsets_size); +		return NULL; +	} + +	if (is_async && +	    proc->free_async_space < size + sizeof(struct binder_buffer)) { +		binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +			     "%d: binder_alloc_buf size %zd failed, no async space left\n", +			      proc->pid, size); +		return NULL; +	} + +	while (n) { +		buffer = rb_entry(n, struct binder_buffer, rb_node); +		BUG_ON(!buffer->free); +		buffer_size = binder_buffer_size(proc, buffer); + +		if (size < buffer_size) { +			best_fit = n; +			n = n->rb_left; +		} else if (size > buffer_size) +			n = n->rb_right; +		else { +			best_fit = n; +			break; +		} +	} +	if (best_fit == NULL) { +		pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", +			proc->pid, size); +		return NULL; +	} +	if (n == NULL) { +		buffer = rb_entry(best_fit, struct binder_buffer, rb_node); +		buffer_size = binder_buffer_size(proc, buffer); +	} + +	binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +		     "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", +		      proc->pid, size, buffer, buffer_size); + +	has_page_addr = +		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); +	if (n == NULL) { +		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) +			buffer_size = size; /* no room for other buffers */ +		else +			buffer_size = size + sizeof(struct binder_buffer); +	} +	end_page_addr = +		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); +	if (end_page_addr > has_page_addr) +		end_page_addr = has_page_addr; +	if (binder_update_page_range(proc, 1, +	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) +		return NULL; + +	rb_erase(best_fit, &proc->free_buffers); +	buffer->free = 0; +	binder_insert_allocated_buffer(proc, buffer); +	if (buffer_size != size) { +		struct binder_buffer *new_buffer = (void *)buffer->data + size; + +		list_add(&new_buffer->entry, &buffer->entry); +		new_buffer->free = 1; +		binder_insert_free_buffer(proc, new_buffer); +	} +	binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +		     "%d: binder_alloc_buf size %zd got %p\n", +		      proc->pid, size, buffer); +	buffer->data_size = data_size; +	buffer->offsets_size = offsets_size; +	buffer->async_transaction = is_async; +	if (is_async) { +		proc->free_async_space -= size + sizeof(struct binder_buffer); +		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, +			     "%d: binder_alloc_buf size %zd async free %zd\n", +			      proc->pid, size, proc->free_async_space); +	} + +	return buffer; +} + +static void *buffer_start_page(struct binder_buffer *buffer) +{ +	return (void *)((uintptr_t)buffer & PAGE_MASK); +} + +static void *buffer_end_page(struct binder_buffer *buffer) +{ +	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); +} + +static void binder_delete_free_buffer(struct binder_proc *proc, +				      struct binder_buffer *buffer) +{ +	struct binder_buffer *prev, *next = NULL; +	int free_page_end = 1; +	int free_page_start = 1; + +	BUG_ON(proc->buffers.next == &buffer->entry); +	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); +	BUG_ON(!prev->free); +	if (buffer_end_page(prev) == buffer_start_page(buffer)) { +		free_page_start = 0; +		if (buffer_end_page(prev) == buffer_end_page(buffer)) +			free_page_end = 0; +		binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +			     "%d: merge free, buffer %p share page with %p\n", +			      proc->pid, buffer, prev); +	} + +	if (!list_is_last(&buffer->entry, &proc->buffers)) { +		next = list_entry(buffer->entry.next, +				  struct binder_buffer, entry); +		if (buffer_start_page(next) == buffer_end_page(buffer)) { +			free_page_end = 0; +			if (buffer_start_page(next) == +			    buffer_start_page(buffer)) +				free_page_start = 0; +			binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +				     "%d: merge free, buffer %p share page with %p\n", +				      proc->pid, buffer, prev); +		} +	} +	list_del(&buffer->entry); +	if (free_page_start || free_page_end) { +		binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +			     "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", +			     proc->pid, buffer, free_page_start ? "" : " end", +			     free_page_end ? "" : " start", prev, next); +		binder_update_page_range(proc, 0, free_page_start ? +			buffer_start_page(buffer) : buffer_end_page(buffer), +			(free_page_end ? buffer_end_page(buffer) : +			buffer_start_page(buffer)) + PAGE_SIZE, NULL); +	} +} + +static void binder_free_buf(struct binder_proc *proc, +			    struct binder_buffer *buffer) +{ +	size_t size, buffer_size; + +	buffer_size = binder_buffer_size(proc, buffer); + +	size = ALIGN(buffer->data_size, sizeof(void *)) + +		ALIGN(buffer->offsets_size, sizeof(void *)); + +	binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +		     "%d: binder_free_buf %p size %zd buffer_size %zd\n", +		      proc->pid, buffer, size, buffer_size); + +	BUG_ON(buffer->free); +	BUG_ON(size > buffer_size); +	BUG_ON(buffer->transaction != NULL); +	BUG_ON((void *)buffer < proc->buffer); +	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); + +	if (buffer->async_transaction) { +		proc->free_async_space += size + sizeof(struct binder_buffer); + +		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, +			     "%d: binder_free_buf size %zd async free %zd\n", +			      proc->pid, size, proc->free_async_space); +	} + +	binder_update_page_range(proc, 0, +		(void *)PAGE_ALIGN((uintptr_t)buffer->data), +		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), +		NULL); +	rb_erase(&buffer->rb_node, &proc->allocated_buffers); +	buffer->free = 1; +	if (!list_is_last(&buffer->entry, &proc->buffers)) { +		struct binder_buffer *next = list_entry(buffer->entry.next, +						struct binder_buffer, entry); + +		if (next->free) { +			rb_erase(&next->rb_node, &proc->free_buffers); +			binder_delete_free_buffer(proc, next); +		} +	} +	if (proc->buffers.next != &buffer->entry) { +		struct binder_buffer *prev = list_entry(buffer->entry.prev, +						struct binder_buffer, entry); + +		if (prev->free) { +			binder_delete_free_buffer(proc, buffer); +			rb_erase(&prev->rb_node, &proc->free_buffers); +			buffer = prev; +		} +	} +	binder_insert_free_buffer(proc, buffer); +} + +static struct binder_node *binder_get_node(struct binder_proc *proc, +					   binder_uintptr_t ptr) +{ +	struct rb_node *n = proc->nodes.rb_node; +	struct binder_node *node; + +	while (n) { +		node = rb_entry(n, struct binder_node, rb_node); + +		if (ptr < node->ptr) +			n = n->rb_left; +		else if (ptr > node->ptr) +			n = n->rb_right; +		else +			return node; +	} +	return NULL; +} + +static struct binder_node *binder_new_node(struct binder_proc *proc, +					   binder_uintptr_t ptr, +					   binder_uintptr_t cookie) +{ +	struct rb_node **p = &proc->nodes.rb_node; +	struct rb_node *parent = NULL; +	struct binder_node *node; + +	while (*p) { +		parent = *p; +		node = rb_entry(parent, struct binder_node, rb_node); + +		if (ptr < node->ptr) +			p = &(*p)->rb_left; +		else if (ptr > node->ptr) +			p = &(*p)->rb_right; +		else +			return NULL; +	} + +	node = kzalloc(sizeof(*node), GFP_KERNEL); +	if (node == NULL) +		return NULL; +	binder_stats_created(BINDER_STAT_NODE); +	rb_link_node(&node->rb_node, parent, p); +	rb_insert_color(&node->rb_node, &proc->nodes); +	node->debug_id = ++binder_last_id; +	node->proc = proc; +	node->ptr = ptr; +	node->cookie = cookie; +	node->work.type = BINDER_WORK_NODE; +	INIT_LIST_HEAD(&node->work.entry); +	INIT_LIST_HEAD(&node->async_todo); +	binder_debug(BINDER_DEBUG_INTERNAL_REFS, +		     "%d:%d node %d u%016llx c%016llx created\n", +		     proc->pid, current->pid, node->debug_id, +		     (u64)node->ptr, (u64)node->cookie); +	return node; +} + +static int binder_inc_node(struct binder_node *node, int strong, int internal, +			   struct list_head *target_list) +{ +	if (strong) { +		if (internal) { +			if (target_list == NULL && +			    node->internal_strong_refs == 0 && +			    !(node == binder_context_mgr_node && +			    node->has_strong_ref)) { +				pr_err("invalid inc strong node for %d\n", +					node->debug_id); +				return -EINVAL; +			} +			node->internal_strong_refs++; +		} else +			node->local_strong_refs++; +		if (!node->has_strong_ref && target_list) { +			list_del_init(&node->work.entry); +			list_add_tail(&node->work.entry, target_list); +		} +	} else { +		if (!internal) +			node->local_weak_refs++; +		if (!node->has_weak_ref && list_empty(&node->work.entry)) { +			if (target_list == NULL) { +				pr_err("invalid inc weak node for %d\n", +					node->debug_id); +				return -EINVAL; +			} +			list_add_tail(&node->work.entry, target_list); +		} +	} +	return 0; +} + +static int binder_dec_node(struct binder_node *node, int strong, int internal) +{ +	if (strong) { +		if (internal) +			node->internal_strong_refs--; +		else +			node->local_strong_refs--; +		if (node->local_strong_refs || node->internal_strong_refs) +			return 0; +	} else { +		if (!internal) +			node->local_weak_refs--; +		if (node->local_weak_refs || !hlist_empty(&node->refs)) +			return 0; +	} +	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { +		if (list_empty(&node->work.entry)) { +			list_add_tail(&node->work.entry, &node->proc->todo); +			wake_up_interruptible(&node->proc->wait); +		} +	} else { +		if (hlist_empty(&node->refs) && !node->local_strong_refs && +		    !node->local_weak_refs) { +			list_del_init(&node->work.entry); +			if (node->proc) { +				rb_erase(&node->rb_node, &node->proc->nodes); +				binder_debug(BINDER_DEBUG_INTERNAL_REFS, +					     "refless node %d deleted\n", +					     node->debug_id); +			} else { +				hlist_del(&node->dead_node); +				binder_debug(BINDER_DEBUG_INTERNAL_REFS, +					     "dead node %d deleted\n", +					     node->debug_id); +			} +			kfree(node); +			binder_stats_deleted(BINDER_STAT_NODE); +		} +	} + +	return 0; +} + + +static struct binder_ref *binder_get_ref(struct binder_proc *proc, +					 uint32_t desc) +{ +	struct rb_node *n = proc->refs_by_desc.rb_node; +	struct binder_ref *ref; + +	while (n) { +		ref = rb_entry(n, struct binder_ref, rb_node_desc); + +		if (desc < ref->desc) +			n = n->rb_left; +		else if (desc > ref->desc) +			n = n->rb_right; +		else +			return ref; +	} +	return NULL; +} + +static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, +						  struct binder_node *node) +{ +	struct rb_node *n; +	struct rb_node **p = &proc->refs_by_node.rb_node; +	struct rb_node *parent = NULL; +	struct binder_ref *ref, *new_ref; + +	while (*p) { +		parent = *p; +		ref = rb_entry(parent, struct binder_ref, rb_node_node); + +		if (node < ref->node) +			p = &(*p)->rb_left; +		else if (node > ref->node) +			p = &(*p)->rb_right; +		else +			return ref; +	} +	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); +	if (new_ref == NULL) +		return NULL; +	binder_stats_created(BINDER_STAT_REF); +	new_ref->debug_id = ++binder_last_id; +	new_ref->proc = proc; +	new_ref->node = node; +	rb_link_node(&new_ref->rb_node_node, parent, p); +	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); + +	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; +	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { +		ref = rb_entry(n, struct binder_ref, rb_node_desc); +		if (ref->desc > new_ref->desc) +			break; +		new_ref->desc = ref->desc + 1; +	} + +	p = &proc->refs_by_desc.rb_node; +	while (*p) { +		parent = *p; +		ref = rb_entry(parent, struct binder_ref, rb_node_desc); + +		if (new_ref->desc < ref->desc) +			p = &(*p)->rb_left; +		else if (new_ref->desc > ref->desc) +			p = &(*p)->rb_right; +		else +			BUG(); +	} +	rb_link_node(&new_ref->rb_node_desc, parent, p); +	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); +	if (node) { +		hlist_add_head(&new_ref->node_entry, &node->refs); + +		binder_debug(BINDER_DEBUG_INTERNAL_REFS, +			     "%d new ref %d desc %d for node %d\n", +			      proc->pid, new_ref->debug_id, new_ref->desc, +			      node->debug_id); +	} else { +		binder_debug(BINDER_DEBUG_INTERNAL_REFS, +			     "%d new ref %d desc %d for dead node\n", +			      proc->pid, new_ref->debug_id, new_ref->desc); +	} +	return new_ref; +} + +static void binder_delete_ref(struct binder_ref *ref) +{ +	binder_debug(BINDER_DEBUG_INTERNAL_REFS, +		     "%d delete ref %d desc %d for node %d\n", +		      ref->proc->pid, ref->debug_id, ref->desc, +		      ref->node->debug_id); + +	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); +	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); +	if (ref->strong) +		binder_dec_node(ref->node, 1, 1); +	hlist_del(&ref->node_entry); +	binder_dec_node(ref->node, 0, 1); +	if (ref->death) { +		binder_debug(BINDER_DEBUG_DEAD_BINDER, +			     "%d delete ref %d desc %d has death notification\n", +			      ref->proc->pid, ref->debug_id, ref->desc); +		list_del(&ref->death->work.entry); +		kfree(ref->death); +		binder_stats_deleted(BINDER_STAT_DEATH); +	} +	kfree(ref); +	binder_stats_deleted(BINDER_STAT_REF); +} + +static int binder_inc_ref(struct binder_ref *ref, int strong, +			  struct list_head *target_list) +{ +	int ret; + +	if (strong) { +		if (ref->strong == 0) { +			ret = binder_inc_node(ref->node, 1, 1, target_list); +			if (ret) +				return ret; +		} +		ref->strong++; +	} else { +		if (ref->weak == 0) { +			ret = binder_inc_node(ref->node, 0, 1, target_list); +			if (ret) +				return ret; +		} +		ref->weak++; +	} +	return 0; +} + + +static int binder_dec_ref(struct binder_ref *ref, int strong) +{ +	if (strong) { +		if (ref->strong == 0) { +			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", +					  ref->proc->pid, ref->debug_id, +					  ref->desc, ref->strong, ref->weak); +			return -EINVAL; +		} +		ref->strong--; +		if (ref->strong == 0) { +			int ret; + +			ret = binder_dec_node(ref->node, strong, 1); +			if (ret) +				return ret; +		} +	} else { +		if (ref->weak == 0) { +			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", +					  ref->proc->pid, ref->debug_id, +					  ref->desc, ref->strong, ref->weak); +			return -EINVAL; +		} +		ref->weak--; +	} +	if (ref->strong == 0 && ref->weak == 0) +		binder_delete_ref(ref); +	return 0; +} + +static void binder_pop_transaction(struct binder_thread *target_thread, +				   struct binder_transaction *t) +{ +	if (target_thread) { +		BUG_ON(target_thread->transaction_stack != t); +		BUG_ON(target_thread->transaction_stack->from != target_thread); +		target_thread->transaction_stack = +			target_thread->transaction_stack->from_parent; +		t->from = NULL; +	} +	t->need_reply = 0; +	if (t->buffer) +		t->buffer->transaction = NULL; +	kfree(t); +	binder_stats_deleted(BINDER_STAT_TRANSACTION); +} + +static void binder_send_failed_reply(struct binder_transaction *t, +				     uint32_t error_code) +{ +	struct binder_thread *target_thread; + +	BUG_ON(t->flags & TF_ONE_WAY); +	while (1) { +		target_thread = t->from; +		if (target_thread) { +			if (target_thread->return_error != BR_OK && +			   target_thread->return_error2 == BR_OK) { +				target_thread->return_error2 = +					target_thread->return_error; +				target_thread->return_error = BR_OK; +			} +			if (target_thread->return_error == BR_OK) { +				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, +					     "send failed reply for transaction %d to %d:%d\n", +					      t->debug_id, target_thread->proc->pid, +					      target_thread->pid); + +				binder_pop_transaction(target_thread, t); +				target_thread->return_error = error_code; +				wake_up_interruptible(&target_thread->wait); +			} else { +				pr_err("reply failed, target thread, %d:%d, has error code %d already\n", +					target_thread->proc->pid, +					target_thread->pid, +					target_thread->return_error); +			} +			return; +		} else { +			struct binder_transaction *next = t->from_parent; + +			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, +				     "send failed reply for transaction %d, target dead\n", +				     t->debug_id); + +			binder_pop_transaction(target_thread, t); +			if (next == NULL) { +				binder_debug(BINDER_DEBUG_DEAD_BINDER, +					     "reply failed, no target thread at root\n"); +				return; +			} +			t = next; +			binder_debug(BINDER_DEBUG_DEAD_BINDER, +				     "reply failed, no target thread -- retry %d\n", +				      t->debug_id); +		} +	} +} + +static void binder_transaction_buffer_release(struct binder_proc *proc, +					      struct binder_buffer *buffer, +					      binder_size_t *failed_at) +{ +	binder_size_t *offp, *off_end; +	int debug_id = buffer->debug_id; + +	binder_debug(BINDER_DEBUG_TRANSACTION, +		     "%d buffer release %d, size %zd-%zd, failed at %p\n", +		     proc->pid, buffer->debug_id, +		     buffer->data_size, buffer->offsets_size, failed_at); + +	if (buffer->target_node) +		binder_dec_node(buffer->target_node, 1, 0); + +	offp = (binder_size_t *)(buffer->data + +				 ALIGN(buffer->data_size, sizeof(void *))); +	if (failed_at) +		off_end = failed_at; +	else +		off_end = (void *)offp + buffer->offsets_size; +	for (; offp < off_end; offp++) { +		struct flat_binder_object *fp; + +		if (*offp > buffer->data_size - sizeof(*fp) || +		    buffer->data_size < sizeof(*fp) || +		    !IS_ALIGNED(*offp, sizeof(u32))) { +			pr_err("transaction release %d bad offset %lld, size %zd\n", +			       debug_id, (u64)*offp, buffer->data_size); +			continue; +		} +		fp = (struct flat_binder_object *)(buffer->data + *offp); +		switch (fp->type) { +		case BINDER_TYPE_BINDER: +		case BINDER_TYPE_WEAK_BINDER: { +			struct binder_node *node = binder_get_node(proc, fp->binder); + +			if (node == NULL) { +				pr_err("transaction release %d bad node %016llx\n", +				       debug_id, (u64)fp->binder); +				break; +			} +			binder_debug(BINDER_DEBUG_TRANSACTION, +				     "        node %d u%016llx\n", +				     node->debug_id, (u64)node->ptr); +			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); +		} break; +		case BINDER_TYPE_HANDLE: +		case BINDER_TYPE_WEAK_HANDLE: { +			struct binder_ref *ref = binder_get_ref(proc, fp->handle); + +			if (ref == NULL) { +				pr_err("transaction release %d bad handle %d\n", +				 debug_id, fp->handle); +				break; +			} +			binder_debug(BINDER_DEBUG_TRANSACTION, +				     "        ref %d desc %d (node %d)\n", +				     ref->debug_id, ref->desc, ref->node->debug_id); +			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); +		} break; + +		case BINDER_TYPE_FD: +			binder_debug(BINDER_DEBUG_TRANSACTION, +				     "        fd %d\n", fp->handle); +			if (failed_at) +				task_close_fd(proc, fp->handle); +			break; + +		default: +			pr_err("transaction release %d bad object type %x\n", +				debug_id, fp->type); +			break; +		} +	} +} + +static void binder_transaction(struct binder_proc *proc, +			       struct binder_thread *thread, +			       struct binder_transaction_data *tr, int reply) +{ +	struct binder_transaction *t; +	struct binder_work *tcomplete; +	binder_size_t *offp, *off_end; +	struct binder_proc *target_proc; +	struct binder_thread *target_thread = NULL; +	struct binder_node *target_node = NULL; +	struct list_head *target_list; +	wait_queue_head_t *target_wait; +	struct binder_transaction *in_reply_to = NULL; +	struct binder_transaction_log_entry *e; +	uint32_t return_error; + +	e = binder_transaction_log_add(&binder_transaction_log); +	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); +	e->from_proc = proc->pid; +	e->from_thread = thread->pid; +	e->target_handle = tr->target.handle; +	e->data_size = tr->data_size; +	e->offsets_size = tr->offsets_size; + +	if (reply) { +		in_reply_to = thread->transaction_stack; +		if (in_reply_to == NULL) { +			binder_user_error("%d:%d got reply transaction with no transaction stack\n", +					  proc->pid, thread->pid); +			return_error = BR_FAILED_REPLY; +			goto err_empty_call_stack; +		} +		binder_set_nice(in_reply_to->saved_priority); +		if (in_reply_to->to_thread != thread) { +			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", +				proc->pid, thread->pid, in_reply_to->debug_id, +				in_reply_to->to_proc ? +				in_reply_to->to_proc->pid : 0, +				in_reply_to->to_thread ? +				in_reply_to->to_thread->pid : 0); +			return_error = BR_FAILED_REPLY; +			in_reply_to = NULL; +			goto err_bad_call_stack; +		} +		thread->transaction_stack = in_reply_to->to_parent; +		target_thread = in_reply_to->from; +		if (target_thread == NULL) { +			return_error = BR_DEAD_REPLY; +			goto err_dead_binder; +		} +		if (target_thread->transaction_stack != in_reply_to) { +			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", +				proc->pid, thread->pid, +				target_thread->transaction_stack ? +				target_thread->transaction_stack->debug_id : 0, +				in_reply_to->debug_id); +			return_error = BR_FAILED_REPLY; +			in_reply_to = NULL; +			target_thread = NULL; +			goto err_dead_binder; +		} +		target_proc = target_thread->proc; +	} else { +		if (tr->target.handle) { +			struct binder_ref *ref; + +			ref = binder_get_ref(proc, tr->target.handle); +			if (ref == NULL) { +				binder_user_error("%d:%d got transaction to invalid handle\n", +					proc->pid, thread->pid); +				return_error = BR_FAILED_REPLY; +				goto err_invalid_target_handle; +			} +			target_node = ref->node; +		} else { +			target_node = binder_context_mgr_node; +			if (target_node == NULL) { +				return_error = BR_DEAD_REPLY; +				goto err_no_context_mgr_node; +			} +		} +		e->to_node = target_node->debug_id; +		target_proc = target_node->proc; +		if (target_proc == NULL) { +			return_error = BR_DEAD_REPLY; +			goto err_dead_binder; +		} +		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { +			struct binder_transaction *tmp; + +			tmp = thread->transaction_stack; +			if (tmp->to_thread != thread) { +				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", +					proc->pid, thread->pid, tmp->debug_id, +					tmp->to_proc ? tmp->to_proc->pid : 0, +					tmp->to_thread ? +					tmp->to_thread->pid : 0); +				return_error = BR_FAILED_REPLY; +				goto err_bad_call_stack; +			} +			while (tmp) { +				if (tmp->from && tmp->from->proc == target_proc) +					target_thread = tmp->from; +				tmp = tmp->from_parent; +			} +		} +	} +	if (target_thread) { +		e->to_thread = target_thread->pid; +		target_list = &target_thread->todo; +		target_wait = &target_thread->wait; +	} else { +		target_list = &target_proc->todo; +		target_wait = &target_proc->wait; +	} +	e->to_proc = target_proc->pid; + +	/* TODO: reuse incoming transaction for reply */ +	t = kzalloc(sizeof(*t), GFP_KERNEL); +	if (t == NULL) { +		return_error = BR_FAILED_REPLY; +		goto err_alloc_t_failed; +	} +	binder_stats_created(BINDER_STAT_TRANSACTION); + +	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); +	if (tcomplete == NULL) { +		return_error = BR_FAILED_REPLY; +		goto err_alloc_tcomplete_failed; +	} +	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); + +	t->debug_id = ++binder_last_id; +	e->debug_id = t->debug_id; + +	if (reply) +		binder_debug(BINDER_DEBUG_TRANSACTION, +			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", +			     proc->pid, thread->pid, t->debug_id, +			     target_proc->pid, target_thread->pid, +			     (u64)tr->data.ptr.buffer, +			     (u64)tr->data.ptr.offsets, +			     (u64)tr->data_size, (u64)tr->offsets_size); +	else +		binder_debug(BINDER_DEBUG_TRANSACTION, +			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", +			     proc->pid, thread->pid, t->debug_id, +			     target_proc->pid, target_node->debug_id, +			     (u64)tr->data.ptr.buffer, +			     (u64)tr->data.ptr.offsets, +			     (u64)tr->data_size, (u64)tr->offsets_size); + +	if (!reply && !(tr->flags & TF_ONE_WAY)) +		t->from = thread; +	else +		t->from = NULL; +	t->sender_euid = task_euid(proc->tsk); +	t->to_proc = target_proc; +	t->to_thread = target_thread; +	t->code = tr->code; +	t->flags = tr->flags; +	t->priority = task_nice(current); + +	trace_binder_transaction(reply, t, target_node); + +	t->buffer = binder_alloc_buf(target_proc, tr->data_size, +		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); +	if (t->buffer == NULL) { +		return_error = BR_FAILED_REPLY; +		goto err_binder_alloc_buf_failed; +	} +	t->buffer->allow_user_free = 0; +	t->buffer->debug_id = t->debug_id; +	t->buffer->transaction = t; +	t->buffer->target_node = target_node; +	trace_binder_transaction_alloc_buf(t->buffer); +	if (target_node) +		binder_inc_node(target_node, 1, 0, NULL); + +	offp = (binder_size_t *)(t->buffer->data + +				 ALIGN(tr->data_size, sizeof(void *))); + +	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) +			   tr->data.ptr.buffer, tr->data_size)) { +		binder_user_error("%d:%d got transaction with invalid data ptr\n", +				proc->pid, thread->pid); +		return_error = BR_FAILED_REPLY; +		goto err_copy_data_failed; +	} +	if (copy_from_user(offp, (const void __user *)(uintptr_t) +			   tr->data.ptr.offsets, tr->offsets_size)) { +		binder_user_error("%d:%d got transaction with invalid offsets ptr\n", +				proc->pid, thread->pid); +		return_error = BR_FAILED_REPLY; +		goto err_copy_data_failed; +	} +	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { +		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", +				proc->pid, thread->pid, (u64)tr->offsets_size); +		return_error = BR_FAILED_REPLY; +		goto err_bad_offset; +	} +	off_end = (void *)offp + tr->offsets_size; +	for (; offp < off_end; offp++) { +		struct flat_binder_object *fp; + +		if (*offp > t->buffer->data_size - sizeof(*fp) || +		    t->buffer->data_size < sizeof(*fp) || +		    !IS_ALIGNED(*offp, sizeof(u32))) { +			binder_user_error("%d:%d got transaction with invalid offset, %lld\n", +					  proc->pid, thread->pid, (u64)*offp); +			return_error = BR_FAILED_REPLY; +			goto err_bad_offset; +		} +		fp = (struct flat_binder_object *)(t->buffer->data + *offp); +		switch (fp->type) { +		case BINDER_TYPE_BINDER: +		case BINDER_TYPE_WEAK_BINDER: { +			struct binder_ref *ref; +			struct binder_node *node = binder_get_node(proc, fp->binder); + +			if (node == NULL) { +				node = binder_new_node(proc, fp->binder, fp->cookie); +				if (node == NULL) { +					return_error = BR_FAILED_REPLY; +					goto err_binder_new_node_failed; +				} +				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; +				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); +			} +			if (fp->cookie != node->cookie) { +				binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", +					proc->pid, thread->pid, +					(u64)fp->binder, node->debug_id, +					(u64)fp->cookie, (u64)node->cookie); +				return_error = BR_FAILED_REPLY; +				goto err_binder_get_ref_for_node_failed; +			} +			ref = binder_get_ref_for_node(target_proc, node); +			if (ref == NULL) { +				return_error = BR_FAILED_REPLY; +				goto err_binder_get_ref_for_node_failed; +			} +			if (fp->type == BINDER_TYPE_BINDER) +				fp->type = BINDER_TYPE_HANDLE; +			else +				fp->type = BINDER_TYPE_WEAK_HANDLE; +			fp->handle = ref->desc; +			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, +				       &thread->todo); + +			trace_binder_transaction_node_to_ref(t, node, ref); +			binder_debug(BINDER_DEBUG_TRANSACTION, +				     "        node %d u%016llx -> ref %d desc %d\n", +				     node->debug_id, (u64)node->ptr, +				     ref->debug_id, ref->desc); +		} break; +		case BINDER_TYPE_HANDLE: +		case BINDER_TYPE_WEAK_HANDLE: { +			struct binder_ref *ref = binder_get_ref(proc, fp->handle); + +			if (ref == NULL) { +				binder_user_error("%d:%d got transaction with invalid handle, %d\n", +						proc->pid, +						thread->pid, fp->handle); +				return_error = BR_FAILED_REPLY; +				goto err_binder_get_ref_failed; +			} +			if (ref->node->proc == target_proc) { +				if (fp->type == BINDER_TYPE_HANDLE) +					fp->type = BINDER_TYPE_BINDER; +				else +					fp->type = BINDER_TYPE_WEAK_BINDER; +				fp->binder = ref->node->ptr; +				fp->cookie = ref->node->cookie; +				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); +				trace_binder_transaction_ref_to_node(t, ref); +				binder_debug(BINDER_DEBUG_TRANSACTION, +					     "        ref %d desc %d -> node %d u%016llx\n", +					     ref->debug_id, ref->desc, ref->node->debug_id, +					     (u64)ref->node->ptr); +			} else { +				struct binder_ref *new_ref; + +				new_ref = binder_get_ref_for_node(target_proc, ref->node); +				if (new_ref == NULL) { +					return_error = BR_FAILED_REPLY; +					goto err_binder_get_ref_for_node_failed; +				} +				fp->handle = new_ref->desc; +				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); +				trace_binder_transaction_ref_to_ref(t, ref, +								    new_ref); +				binder_debug(BINDER_DEBUG_TRANSACTION, +					     "        ref %d desc %d -> ref %d desc %d (node %d)\n", +					     ref->debug_id, ref->desc, new_ref->debug_id, +					     new_ref->desc, ref->node->debug_id); +			} +		} break; + +		case BINDER_TYPE_FD: { +			int target_fd; +			struct file *file; + +			if (reply) { +				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { +					binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", +						proc->pid, thread->pid, fp->handle); +					return_error = BR_FAILED_REPLY; +					goto err_fd_not_allowed; +				} +			} else if (!target_node->accept_fds) { +				binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", +					proc->pid, thread->pid, fp->handle); +				return_error = BR_FAILED_REPLY; +				goto err_fd_not_allowed; +			} + +			file = fget(fp->handle); +			if (file == NULL) { +				binder_user_error("%d:%d got transaction with invalid fd, %d\n", +					proc->pid, thread->pid, fp->handle); +				return_error = BR_FAILED_REPLY; +				goto err_fget_failed; +			} +			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); +			if (target_fd < 0) { +				fput(file); +				return_error = BR_FAILED_REPLY; +				goto err_get_unused_fd_failed; +			} +			task_fd_install(target_proc, target_fd, file); +			trace_binder_transaction_fd(t, fp->handle, target_fd); +			binder_debug(BINDER_DEBUG_TRANSACTION, +				     "        fd %d -> %d\n", fp->handle, target_fd); +			/* TODO: fput? */ +			fp->handle = target_fd; +		} break; + +		default: +			binder_user_error("%d:%d got transaction with invalid object type, %x\n", +				proc->pid, thread->pid, fp->type); +			return_error = BR_FAILED_REPLY; +			goto err_bad_object_type; +		} +	} +	if (reply) { +		BUG_ON(t->buffer->async_transaction != 0); +		binder_pop_transaction(target_thread, in_reply_to); +	} else if (!(t->flags & TF_ONE_WAY)) { +		BUG_ON(t->buffer->async_transaction != 0); +		t->need_reply = 1; +		t->from_parent = thread->transaction_stack; +		thread->transaction_stack = t; +	} else { +		BUG_ON(target_node == NULL); +		BUG_ON(t->buffer->async_transaction != 1); +		if (target_node->has_async_transaction) { +			target_list = &target_node->async_todo; +			target_wait = NULL; +		} else +			target_node->has_async_transaction = 1; +	} +	t->work.type = BINDER_WORK_TRANSACTION; +	list_add_tail(&t->work.entry, target_list); +	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; +	list_add_tail(&tcomplete->entry, &thread->todo); +	if (target_wait) +		wake_up_interruptible(target_wait); +	return; + +err_get_unused_fd_failed: +err_fget_failed: +err_fd_not_allowed: +err_binder_get_ref_for_node_failed: +err_binder_get_ref_failed: +err_binder_new_node_failed: +err_bad_object_type: +err_bad_offset: +err_copy_data_failed: +	trace_binder_transaction_failed_buffer_release(t->buffer); +	binder_transaction_buffer_release(target_proc, t->buffer, offp); +	t->buffer->transaction = NULL; +	binder_free_buf(target_proc, t->buffer); +err_binder_alloc_buf_failed: +	kfree(tcomplete); +	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +err_alloc_tcomplete_failed: +	kfree(t); +	binder_stats_deleted(BINDER_STAT_TRANSACTION); +err_alloc_t_failed: +err_bad_call_stack: +err_empty_call_stack: +err_dead_binder: +err_invalid_target_handle: +err_no_context_mgr_node: +	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, +		     "%d:%d transaction failed %d, size %lld-%lld\n", +		     proc->pid, thread->pid, return_error, +		     (u64)tr->data_size, (u64)tr->offsets_size); + +	{ +		struct binder_transaction_log_entry *fe; + +		fe = binder_transaction_log_add(&binder_transaction_log_failed); +		*fe = *e; +	} + +	BUG_ON(thread->return_error != BR_OK); +	if (in_reply_to) { +		thread->return_error = BR_TRANSACTION_COMPLETE; +		binder_send_failed_reply(in_reply_to, return_error); +	} else +		thread->return_error = return_error; +} + +static int binder_thread_write(struct binder_proc *proc, +			struct binder_thread *thread, +			binder_uintptr_t binder_buffer, size_t size, +			binder_size_t *consumed) +{ +	uint32_t cmd; +	void __user *buffer = (void __user *)(uintptr_t)binder_buffer; +	void __user *ptr = buffer + *consumed; +	void __user *end = buffer + size; + +	while (ptr < end && thread->return_error == BR_OK) { +		if (get_user(cmd, (uint32_t __user *)ptr)) +			return -EFAULT; +		ptr += sizeof(uint32_t); +		trace_binder_command(cmd); +		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { +			binder_stats.bc[_IOC_NR(cmd)]++; +			proc->stats.bc[_IOC_NR(cmd)]++; +			thread->stats.bc[_IOC_NR(cmd)]++; +		} +		switch (cmd) { +		case BC_INCREFS: +		case BC_ACQUIRE: +		case BC_RELEASE: +		case BC_DECREFS: { +			uint32_t target; +			struct binder_ref *ref; +			const char *debug_string; + +			if (get_user(target, (uint32_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(uint32_t); +			if (target == 0 && binder_context_mgr_node && +			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { +				ref = binder_get_ref_for_node(proc, +					       binder_context_mgr_node); +				if (ref->desc != target) { +					binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", +						proc->pid, thread->pid, +						ref->desc); +				} +			} else +				ref = binder_get_ref(proc, target); +			if (ref == NULL) { +				binder_user_error("%d:%d refcount change on invalid ref %d\n", +					proc->pid, thread->pid, target); +				break; +			} +			switch (cmd) { +			case BC_INCREFS: +				debug_string = "IncRefs"; +				binder_inc_ref(ref, 0, NULL); +				break; +			case BC_ACQUIRE: +				debug_string = "Acquire"; +				binder_inc_ref(ref, 1, NULL); +				break; +			case BC_RELEASE: +				debug_string = "Release"; +				binder_dec_ref(ref, 1); +				break; +			case BC_DECREFS: +			default: +				debug_string = "DecRefs"; +				binder_dec_ref(ref, 0); +				break; +			} +			binder_debug(BINDER_DEBUG_USER_REFS, +				     "%d:%d %s ref %d desc %d s %d w %d for node %d\n", +				     proc->pid, thread->pid, debug_string, ref->debug_id, +				     ref->desc, ref->strong, ref->weak, ref->node->debug_id); +			break; +		} +		case BC_INCREFS_DONE: +		case BC_ACQUIRE_DONE: { +			binder_uintptr_t node_ptr; +			binder_uintptr_t cookie; +			struct binder_node *node; + +			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(binder_uintptr_t); +			if (get_user(cookie, (binder_uintptr_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(binder_uintptr_t); +			node = binder_get_node(proc, node_ptr); +			if (node == NULL) { +				binder_user_error("%d:%d %s u%016llx no match\n", +					proc->pid, thread->pid, +					cmd == BC_INCREFS_DONE ? +					"BC_INCREFS_DONE" : +					"BC_ACQUIRE_DONE", +					(u64)node_ptr); +				break; +			} +			if (cookie != node->cookie) { +				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", +					proc->pid, thread->pid, +					cmd == BC_INCREFS_DONE ? +					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", +					(u64)node_ptr, node->debug_id, +					(u64)cookie, (u64)node->cookie); +				break; +			} +			if (cmd == BC_ACQUIRE_DONE) { +				if (node->pending_strong_ref == 0) { +					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", +						proc->pid, thread->pid, +						node->debug_id); +					break; +				} +				node->pending_strong_ref = 0; +			} else { +				if (node->pending_weak_ref == 0) { +					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", +						proc->pid, thread->pid, +						node->debug_id); +					break; +				} +				node->pending_weak_ref = 0; +			} +			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); +			binder_debug(BINDER_DEBUG_USER_REFS, +				     "%d:%d %s node %d ls %d lw %d\n", +				     proc->pid, thread->pid, +				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", +				     node->debug_id, node->local_strong_refs, node->local_weak_refs); +			break; +		} +		case BC_ATTEMPT_ACQUIRE: +			pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); +			return -EINVAL; +		case BC_ACQUIRE_RESULT: +			pr_err("BC_ACQUIRE_RESULT not supported\n"); +			return -EINVAL; + +		case BC_FREE_BUFFER: { +			binder_uintptr_t data_ptr; +			struct binder_buffer *buffer; + +			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(binder_uintptr_t); + +			buffer = binder_buffer_lookup(proc, data_ptr); +			if (buffer == NULL) { +				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", +					proc->pid, thread->pid, (u64)data_ptr); +				break; +			} +			if (!buffer->allow_user_free) { +				binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", +					proc->pid, thread->pid, (u64)data_ptr); +				break; +			} +			binder_debug(BINDER_DEBUG_FREE_BUFFER, +				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", +				     proc->pid, thread->pid, (u64)data_ptr, +				     buffer->debug_id, +				     buffer->transaction ? "active" : "finished"); + +			if (buffer->transaction) { +				buffer->transaction->buffer = NULL; +				buffer->transaction = NULL; +			} +			if (buffer->async_transaction && buffer->target_node) { +				BUG_ON(!buffer->target_node->has_async_transaction); +				if (list_empty(&buffer->target_node->async_todo)) +					buffer->target_node->has_async_transaction = 0; +				else +					list_move_tail(buffer->target_node->async_todo.next, &thread->todo); +			} +			trace_binder_transaction_buffer_release(buffer); +			binder_transaction_buffer_release(proc, buffer, NULL); +			binder_free_buf(proc, buffer); +			break; +		} + +		case BC_TRANSACTION: +		case BC_REPLY: { +			struct binder_transaction_data tr; + +			if (copy_from_user(&tr, ptr, sizeof(tr))) +				return -EFAULT; +			ptr += sizeof(tr); +			binder_transaction(proc, thread, &tr, cmd == BC_REPLY); +			break; +		} + +		case BC_REGISTER_LOOPER: +			binder_debug(BINDER_DEBUG_THREADS, +				     "%d:%d BC_REGISTER_LOOPER\n", +				     proc->pid, thread->pid); +			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { +				thread->looper |= BINDER_LOOPER_STATE_INVALID; +				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", +					proc->pid, thread->pid); +			} else if (proc->requested_threads == 0) { +				thread->looper |= BINDER_LOOPER_STATE_INVALID; +				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", +					proc->pid, thread->pid); +			} else { +				proc->requested_threads--; +				proc->requested_threads_started++; +			} +			thread->looper |= BINDER_LOOPER_STATE_REGISTERED; +			break; +		case BC_ENTER_LOOPER: +			binder_debug(BINDER_DEBUG_THREADS, +				     "%d:%d BC_ENTER_LOOPER\n", +				     proc->pid, thread->pid); +			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { +				thread->looper |= BINDER_LOOPER_STATE_INVALID; +				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", +					proc->pid, thread->pid); +			} +			thread->looper |= BINDER_LOOPER_STATE_ENTERED; +			break; +		case BC_EXIT_LOOPER: +			binder_debug(BINDER_DEBUG_THREADS, +				     "%d:%d BC_EXIT_LOOPER\n", +				     proc->pid, thread->pid); +			thread->looper |= BINDER_LOOPER_STATE_EXITED; +			break; + +		case BC_REQUEST_DEATH_NOTIFICATION: +		case BC_CLEAR_DEATH_NOTIFICATION: { +			uint32_t target; +			binder_uintptr_t cookie; +			struct binder_ref *ref; +			struct binder_ref_death *death; + +			if (get_user(target, (uint32_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(uint32_t); +			if (get_user(cookie, (binder_uintptr_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(binder_uintptr_t); +			ref = binder_get_ref(proc, target); +			if (ref == NULL) { +				binder_user_error("%d:%d %s invalid ref %d\n", +					proc->pid, thread->pid, +					cmd == BC_REQUEST_DEATH_NOTIFICATION ? +					"BC_REQUEST_DEATH_NOTIFICATION" : +					"BC_CLEAR_DEATH_NOTIFICATION", +					target); +				break; +			} + +			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, +				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", +				     proc->pid, thread->pid, +				     cmd == BC_REQUEST_DEATH_NOTIFICATION ? +				     "BC_REQUEST_DEATH_NOTIFICATION" : +				     "BC_CLEAR_DEATH_NOTIFICATION", +				     (u64)cookie, ref->debug_id, ref->desc, +				     ref->strong, ref->weak, ref->node->debug_id); + +			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { +				if (ref->death) { +					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", +						proc->pid, thread->pid); +					break; +				} +				death = kzalloc(sizeof(*death), GFP_KERNEL); +				if (death == NULL) { +					thread->return_error = BR_ERROR; +					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, +						     "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", +						     proc->pid, thread->pid); +					break; +				} +				binder_stats_created(BINDER_STAT_DEATH); +				INIT_LIST_HEAD(&death->work.entry); +				death->cookie = cookie; +				ref->death = death; +				if (ref->node->proc == NULL) { +					ref->death->work.type = BINDER_WORK_DEAD_BINDER; +					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { +						list_add_tail(&ref->death->work.entry, &thread->todo); +					} else { +						list_add_tail(&ref->death->work.entry, &proc->todo); +						wake_up_interruptible(&proc->wait); +					} +				} +			} else { +				if (ref->death == NULL) { +					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", +						proc->pid, thread->pid); +					break; +				} +				death = ref->death; +				if (death->cookie != cookie) { +					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", +						proc->pid, thread->pid, +						(u64)death->cookie, +						(u64)cookie); +					break; +				} +				ref->death = NULL; +				if (list_empty(&death->work.entry)) { +					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; +					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { +						list_add_tail(&death->work.entry, &thread->todo); +					} else { +						list_add_tail(&death->work.entry, &proc->todo); +						wake_up_interruptible(&proc->wait); +					} +				} else { +					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); +					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; +				} +			} +		} break; +		case BC_DEAD_BINDER_DONE: { +			struct binder_work *w; +			binder_uintptr_t cookie; +			struct binder_ref_death *death = NULL; + +			if (get_user(cookie, (binder_uintptr_t __user *)ptr)) +				return -EFAULT; + +			ptr += sizeof(void *); +			list_for_each_entry(w, &proc->delivered_death, entry) { +				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); + +				if (tmp_death->cookie == cookie) { +					death = tmp_death; +					break; +				} +			} +			binder_debug(BINDER_DEBUG_DEAD_BINDER, +				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", +				     proc->pid, thread->pid, (u64)cookie, +				     death); +			if (death == NULL) { +				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", +					proc->pid, thread->pid, (u64)cookie); +				break; +			} + +			list_del_init(&death->work.entry); +			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { +				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; +				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { +					list_add_tail(&death->work.entry, &thread->todo); +				} else { +					list_add_tail(&death->work.entry, &proc->todo); +					wake_up_interruptible(&proc->wait); +				} +			} +		} break; + +		default: +			pr_err("%d:%d unknown command %d\n", +			       proc->pid, thread->pid, cmd); +			return -EINVAL; +		} +		*consumed = ptr - buffer; +	} +	return 0; +} + +static void binder_stat_br(struct binder_proc *proc, +			   struct binder_thread *thread, uint32_t cmd) +{ +	trace_binder_return(cmd); +	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { +		binder_stats.br[_IOC_NR(cmd)]++; +		proc->stats.br[_IOC_NR(cmd)]++; +		thread->stats.br[_IOC_NR(cmd)]++; +	} +} + +static int binder_has_proc_work(struct binder_proc *proc, +				struct binder_thread *thread) +{ +	return !list_empty(&proc->todo) || +		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); +} + +static int binder_has_thread_work(struct binder_thread *thread) +{ +	return !list_empty(&thread->todo) || thread->return_error != BR_OK || +		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); +} + +static int binder_thread_read(struct binder_proc *proc, +			      struct binder_thread *thread, +			      binder_uintptr_t binder_buffer, size_t size, +			      binder_size_t *consumed, int non_block) +{ +	void __user *buffer = (void __user *)(uintptr_t)binder_buffer; +	void __user *ptr = buffer + *consumed; +	void __user *end = buffer + size; + +	int ret = 0; +	int wait_for_proc_work; + +	if (*consumed == 0) { +		if (put_user(BR_NOOP, (uint32_t __user *)ptr)) +			return -EFAULT; +		ptr += sizeof(uint32_t); +	} + +retry: +	wait_for_proc_work = thread->transaction_stack == NULL && +				list_empty(&thread->todo); + +	if (thread->return_error != BR_OK && ptr < end) { +		if (thread->return_error2 != BR_OK) { +			if (put_user(thread->return_error2, (uint32_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(uint32_t); +			binder_stat_br(proc, thread, thread->return_error2); +			if (ptr == end) +				goto done; +			thread->return_error2 = BR_OK; +		} +		if (put_user(thread->return_error, (uint32_t __user *)ptr)) +			return -EFAULT; +		ptr += sizeof(uint32_t); +		binder_stat_br(proc, thread, thread->return_error); +		thread->return_error = BR_OK; +		goto done; +	} + + +	thread->looper |= BINDER_LOOPER_STATE_WAITING; +	if (wait_for_proc_work) +		proc->ready_threads++; + +	binder_unlock(__func__); + +	trace_binder_wait_for_work(wait_for_proc_work, +				   !!thread->transaction_stack, +				   !list_empty(&thread->todo)); +	if (wait_for_proc_work) { +		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | +					BINDER_LOOPER_STATE_ENTERED))) { +			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", +				proc->pid, thread->pid, thread->looper); +			wait_event_interruptible(binder_user_error_wait, +						 binder_stop_on_user_error < 2); +		} +		binder_set_nice(proc->default_priority); +		if (non_block) { +			if (!binder_has_proc_work(proc, thread)) +				ret = -EAGAIN; +		} else +			ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); +	} else { +		if (non_block) { +			if (!binder_has_thread_work(thread)) +				ret = -EAGAIN; +		} else +			ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); +	} + +	binder_lock(__func__); + +	if (wait_for_proc_work) +		proc->ready_threads--; +	thread->looper &= ~BINDER_LOOPER_STATE_WAITING; + +	if (ret) +		return ret; + +	while (1) { +		uint32_t cmd; +		struct binder_transaction_data tr; +		struct binder_work *w; +		struct binder_transaction *t = NULL; + +		if (!list_empty(&thread->todo)) +			w = list_first_entry(&thread->todo, struct binder_work, entry); +		else if (!list_empty(&proc->todo) && wait_for_proc_work) +			w = list_first_entry(&proc->todo, struct binder_work, entry); +		else { +			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ +				goto retry; +			break; +		} + +		if (end - ptr < sizeof(tr) + 4) +			break; + +		switch (w->type) { +		case BINDER_WORK_TRANSACTION: { +			t = container_of(w, struct binder_transaction, work); +		} break; +		case BINDER_WORK_TRANSACTION_COMPLETE: { +			cmd = BR_TRANSACTION_COMPLETE; +			if (put_user(cmd, (uint32_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(uint32_t); + +			binder_stat_br(proc, thread, cmd); +			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, +				     "%d:%d BR_TRANSACTION_COMPLETE\n", +				     proc->pid, thread->pid); + +			list_del(&w->entry); +			kfree(w); +			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +		} break; +		case BINDER_WORK_NODE: { +			struct binder_node *node = container_of(w, struct binder_node, work); +			uint32_t cmd = BR_NOOP; +			const char *cmd_name; +			int strong = node->internal_strong_refs || node->local_strong_refs; +			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; + +			if (weak && !node->has_weak_ref) { +				cmd = BR_INCREFS; +				cmd_name = "BR_INCREFS"; +				node->has_weak_ref = 1; +				node->pending_weak_ref = 1; +				node->local_weak_refs++; +			} else if (strong && !node->has_strong_ref) { +				cmd = BR_ACQUIRE; +				cmd_name = "BR_ACQUIRE"; +				node->has_strong_ref = 1; +				node->pending_strong_ref = 1; +				node->local_strong_refs++; +			} else if (!strong && node->has_strong_ref) { +				cmd = BR_RELEASE; +				cmd_name = "BR_RELEASE"; +				node->has_strong_ref = 0; +			} else if (!weak && node->has_weak_ref) { +				cmd = BR_DECREFS; +				cmd_name = "BR_DECREFS"; +				node->has_weak_ref = 0; +			} +			if (cmd != BR_NOOP) { +				if (put_user(cmd, (uint32_t __user *)ptr)) +					return -EFAULT; +				ptr += sizeof(uint32_t); +				if (put_user(node->ptr, +					     (binder_uintptr_t __user *)ptr)) +					return -EFAULT; +				ptr += sizeof(binder_uintptr_t); +				if (put_user(node->cookie, +					     (binder_uintptr_t __user *)ptr)) +					return -EFAULT; +				ptr += sizeof(binder_uintptr_t); + +				binder_stat_br(proc, thread, cmd); +				binder_debug(BINDER_DEBUG_USER_REFS, +					     "%d:%d %s %d u%016llx c%016llx\n", +					     proc->pid, thread->pid, cmd_name, +					     node->debug_id, +					     (u64)node->ptr, (u64)node->cookie); +			} else { +				list_del_init(&w->entry); +				if (!weak && !strong) { +					binder_debug(BINDER_DEBUG_INTERNAL_REFS, +						     "%d:%d node %d u%016llx c%016llx deleted\n", +						     proc->pid, thread->pid, +						     node->debug_id, +						     (u64)node->ptr, +						     (u64)node->cookie); +					rb_erase(&node->rb_node, &proc->nodes); +					kfree(node); +					binder_stats_deleted(BINDER_STAT_NODE); +				} else { +					binder_debug(BINDER_DEBUG_INTERNAL_REFS, +						     "%d:%d node %d u%016llx c%016llx state unchanged\n", +						     proc->pid, thread->pid, +						     node->debug_id, +						     (u64)node->ptr, +						     (u64)node->cookie); +				} +			} +		} break; +		case BINDER_WORK_DEAD_BINDER: +		case BINDER_WORK_DEAD_BINDER_AND_CLEAR: +		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { +			struct binder_ref_death *death; +			uint32_t cmd; + +			death = container_of(w, struct binder_ref_death, work); +			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) +				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; +			else +				cmd = BR_DEAD_BINDER; +			if (put_user(cmd, (uint32_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(uint32_t); +			if (put_user(death->cookie, +				     (binder_uintptr_t __user *)ptr)) +				return -EFAULT; +			ptr += sizeof(binder_uintptr_t); +			binder_stat_br(proc, thread, cmd); +			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, +				     "%d:%d %s %016llx\n", +				      proc->pid, thread->pid, +				      cmd == BR_DEAD_BINDER ? +				      "BR_DEAD_BINDER" : +				      "BR_CLEAR_DEATH_NOTIFICATION_DONE", +				      (u64)death->cookie); + +			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { +				list_del(&w->entry); +				kfree(death); +				binder_stats_deleted(BINDER_STAT_DEATH); +			} else +				list_move(&w->entry, &proc->delivered_death); +			if (cmd == BR_DEAD_BINDER) +				goto done; /* DEAD_BINDER notifications can cause transactions */ +		} break; +		} + +		if (!t) +			continue; + +		BUG_ON(t->buffer == NULL); +		if (t->buffer->target_node) { +			struct binder_node *target_node = t->buffer->target_node; + +			tr.target.ptr = target_node->ptr; +			tr.cookie =  target_node->cookie; +			t->saved_priority = task_nice(current); +			if (t->priority < target_node->min_priority && +			    !(t->flags & TF_ONE_WAY)) +				binder_set_nice(t->priority); +			else if (!(t->flags & TF_ONE_WAY) || +				 t->saved_priority > target_node->min_priority) +				binder_set_nice(target_node->min_priority); +			cmd = BR_TRANSACTION; +		} else { +			tr.target.ptr = 0; +			tr.cookie = 0; +			cmd = BR_REPLY; +		} +		tr.code = t->code; +		tr.flags = t->flags; +		tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + +		if (t->from) { +			struct task_struct *sender = t->from->proc->tsk; + +			tr.sender_pid = task_tgid_nr_ns(sender, +							task_active_pid_ns(current)); +		} else { +			tr.sender_pid = 0; +		} + +		tr.data_size = t->buffer->data_size; +		tr.offsets_size = t->buffer->offsets_size; +		tr.data.ptr.buffer = (binder_uintptr_t)( +					(uintptr_t)t->buffer->data + +					proc->user_buffer_offset); +		tr.data.ptr.offsets = tr.data.ptr.buffer + +					ALIGN(t->buffer->data_size, +					    sizeof(void *)); + +		if (put_user(cmd, (uint32_t __user *)ptr)) +			return -EFAULT; +		ptr += sizeof(uint32_t); +		if (copy_to_user(ptr, &tr, sizeof(tr))) +			return -EFAULT; +		ptr += sizeof(tr); + +		trace_binder_transaction_received(t); +		binder_stat_br(proc, thread, cmd); +		binder_debug(BINDER_DEBUG_TRANSACTION, +			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", +			     proc->pid, thread->pid, +			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : +			     "BR_REPLY", +			     t->debug_id, t->from ? t->from->proc->pid : 0, +			     t->from ? t->from->pid : 0, cmd, +			     t->buffer->data_size, t->buffer->offsets_size, +			     (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + +		list_del(&t->work.entry); +		t->buffer->allow_user_free = 1; +		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { +			t->to_parent = thread->transaction_stack; +			t->to_thread = thread; +			thread->transaction_stack = t; +		} else { +			t->buffer->transaction = NULL; +			kfree(t); +			binder_stats_deleted(BINDER_STAT_TRANSACTION); +		} +		break; +	} + +done: + +	*consumed = ptr - buffer; +	if (proc->requested_threads + proc->ready_threads == 0 && +	    proc->requested_threads_started < proc->max_threads && +	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | +	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ +	     /*spawn a new thread if we leave this out */) { +		proc->requested_threads++; +		binder_debug(BINDER_DEBUG_THREADS, +			     "%d:%d BR_SPAWN_LOOPER\n", +			     proc->pid, thread->pid); +		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) +			return -EFAULT; +		binder_stat_br(proc, thread, BR_SPAWN_LOOPER); +	} +	return 0; +} + +static void binder_release_work(struct list_head *list) +{ +	struct binder_work *w; + +	while (!list_empty(list)) { +		w = list_first_entry(list, struct binder_work, entry); +		list_del_init(&w->entry); +		switch (w->type) { +		case BINDER_WORK_TRANSACTION: { +			struct binder_transaction *t; + +			t = container_of(w, struct binder_transaction, work); +			if (t->buffer->target_node && +			    !(t->flags & TF_ONE_WAY)) { +				binder_send_failed_reply(t, BR_DEAD_REPLY); +			} else { +				binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, +					"undelivered transaction %d\n", +					t->debug_id); +				t->buffer->transaction = NULL; +				kfree(t); +				binder_stats_deleted(BINDER_STAT_TRANSACTION); +			} +		} break; +		case BINDER_WORK_TRANSACTION_COMPLETE: { +			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, +				"undelivered TRANSACTION_COMPLETE\n"); +			kfree(w); +			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); +		} break; +		case BINDER_WORK_DEAD_BINDER_AND_CLEAR: +		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { +			struct binder_ref_death *death; + +			death = container_of(w, struct binder_ref_death, work); +			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, +				"undelivered death notification, %016llx\n", +				(u64)death->cookie); +			kfree(death); +			binder_stats_deleted(BINDER_STAT_DEATH); +		} break; +		default: +			pr_err("unexpected work type, %d, not freed\n", +			       w->type); +			break; +		} +	} + +} + +static struct binder_thread *binder_get_thread(struct binder_proc *proc) +{ +	struct binder_thread *thread = NULL; +	struct rb_node *parent = NULL; +	struct rb_node **p = &proc->threads.rb_node; + +	while (*p) { +		parent = *p; +		thread = rb_entry(parent, struct binder_thread, rb_node); + +		if (current->pid < thread->pid) +			p = &(*p)->rb_left; +		else if (current->pid > thread->pid) +			p = &(*p)->rb_right; +		else +			break; +	} +	if (*p == NULL) { +		thread = kzalloc(sizeof(*thread), GFP_KERNEL); +		if (thread == NULL) +			return NULL; +		binder_stats_created(BINDER_STAT_THREAD); +		thread->proc = proc; +		thread->pid = current->pid; +		init_waitqueue_head(&thread->wait); +		INIT_LIST_HEAD(&thread->todo); +		rb_link_node(&thread->rb_node, parent, p); +		rb_insert_color(&thread->rb_node, &proc->threads); +		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; +		thread->return_error = BR_OK; +		thread->return_error2 = BR_OK; +	} +	return thread; +} + +static int binder_free_thread(struct binder_proc *proc, +			      struct binder_thread *thread) +{ +	struct binder_transaction *t; +	struct binder_transaction *send_reply = NULL; +	int active_transactions = 0; + +	rb_erase(&thread->rb_node, &proc->threads); +	t = thread->transaction_stack; +	if (t && t->to_thread == thread) +		send_reply = t; +	while (t) { +		active_transactions++; +		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, +			     "release %d:%d transaction %d %s, still active\n", +			      proc->pid, thread->pid, +			     t->debug_id, +			     (t->to_thread == thread) ? "in" : "out"); + +		if (t->to_thread == thread) { +			t->to_proc = NULL; +			t->to_thread = NULL; +			if (t->buffer) { +				t->buffer->transaction = NULL; +				t->buffer = NULL; +			} +			t = t->to_parent; +		} else if (t->from == thread) { +			t->from = NULL; +			t = t->from_parent; +		} else +			BUG(); +	} +	if (send_reply) +		binder_send_failed_reply(send_reply, BR_DEAD_REPLY); +	binder_release_work(&thread->todo); +	kfree(thread); +	binder_stats_deleted(BINDER_STAT_THREAD); +	return active_transactions; +} + +static unsigned int binder_poll(struct file *filp, +				struct poll_table_struct *wait) +{ +	struct binder_proc *proc = filp->private_data; +	struct binder_thread *thread = NULL; +	int wait_for_proc_work; + +	binder_lock(__func__); + +	thread = binder_get_thread(proc); + +	wait_for_proc_work = thread->transaction_stack == NULL && +		list_empty(&thread->todo) && thread->return_error == BR_OK; + +	binder_unlock(__func__); + +	if (wait_for_proc_work) { +		if (binder_has_proc_work(proc, thread)) +			return POLLIN; +		poll_wait(filp, &proc->wait, wait); +		if (binder_has_proc_work(proc, thread)) +			return POLLIN; +	} else { +		if (binder_has_thread_work(thread)) +			return POLLIN; +		poll_wait(filp, &thread->wait, wait); +		if (binder_has_thread_work(thread)) +			return POLLIN; +	} +	return 0; +} + +static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ +	int ret; +	struct binder_proc *proc = filp->private_data; +	struct binder_thread *thread; +	unsigned int size = _IOC_SIZE(cmd); +	void __user *ubuf = (void __user *)arg; +	kuid_t curr_euid = current_euid(); + +	/*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ + +	trace_binder_ioctl(cmd, arg); + +	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); +	if (ret) +		goto err_unlocked; + +	binder_lock(__func__); +	thread = binder_get_thread(proc); +	if (thread == NULL) { +		ret = -ENOMEM; +		goto err; +	} + +	switch (cmd) { +	case BINDER_WRITE_READ: { +		struct binder_write_read bwr; + +		if (size != sizeof(struct binder_write_read)) { +			ret = -EINVAL; +			goto err; +		} +		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { +			ret = -EFAULT; +			goto err; +		} +		binder_debug(BINDER_DEBUG_READ_WRITE, +			     "%d:%d write %lld at %016llx, read %lld at %016llx\n", +			     proc->pid, thread->pid, +			     (u64)bwr.write_size, (u64)bwr.write_buffer, +			     (u64)bwr.read_size, (u64)bwr.read_buffer); + +		if (bwr.write_size > 0) { +			ret = binder_thread_write(proc, thread, +						  bwr.write_buffer, +						  bwr.write_size, +						  &bwr.write_consumed); +			trace_binder_write_done(ret); +			if (ret < 0) { +				bwr.read_consumed = 0; +				if (copy_to_user(ubuf, &bwr, sizeof(bwr))) +					ret = -EFAULT; +				goto err; +			} +		} +		if (bwr.read_size > 0) { +			ret = binder_thread_read(proc, thread, bwr.read_buffer, +						 bwr.read_size, +						 &bwr.read_consumed, +						 filp->f_flags & O_NONBLOCK); +			trace_binder_read_done(ret); +			if (!list_empty(&proc->todo)) +				wake_up_interruptible(&proc->wait); +			if (ret < 0) { +				if (copy_to_user(ubuf, &bwr, sizeof(bwr))) +					ret = -EFAULT; +				goto err; +			} +		} +		binder_debug(BINDER_DEBUG_READ_WRITE, +			     "%d:%d wrote %lld of %lld, read return %lld of %lld\n", +			     proc->pid, thread->pid, +			     (u64)bwr.write_consumed, (u64)bwr.write_size, +			     (u64)bwr.read_consumed, (u64)bwr.read_size); +		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { +			ret = -EFAULT; +			goto err; +		} +		break; +	} +	case BINDER_SET_MAX_THREADS: +		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { +			ret = -EINVAL; +			goto err; +		} +		break; +	case BINDER_SET_CONTEXT_MGR: +		if (binder_context_mgr_node != NULL) { +			pr_err("BINDER_SET_CONTEXT_MGR already set\n"); +			ret = -EBUSY; +			goto err; +		} +		if (uid_valid(binder_context_mgr_uid)) { +			if (!uid_eq(binder_context_mgr_uid, curr_euid)) { +				pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", +				       from_kuid(&init_user_ns, curr_euid), +				       from_kuid(&init_user_ns, binder_context_mgr_uid)); +				ret = -EPERM; +				goto err; +			} +		} else { +			binder_context_mgr_uid = curr_euid; +		} +		binder_context_mgr_node = binder_new_node(proc, 0, 0); +		if (binder_context_mgr_node == NULL) { +			ret = -ENOMEM; +			goto err; +		} +		binder_context_mgr_node->local_weak_refs++; +		binder_context_mgr_node->local_strong_refs++; +		binder_context_mgr_node->has_strong_ref = 1; +		binder_context_mgr_node->has_weak_ref = 1; +		break; +	case BINDER_THREAD_EXIT: +		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", +			     proc->pid, thread->pid); +		binder_free_thread(proc, thread); +		thread = NULL; +		break; +	case BINDER_VERSION: { +		struct binder_version __user *ver = ubuf; + +		if (size != sizeof(struct binder_version)) { +			ret = -EINVAL; +			goto err; +		} +		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, +			     &ver->protocol_version)) { +			ret = -EINVAL; +			goto err; +		} +		break; +	} +	default: +		ret = -EINVAL; +		goto err; +	} +	ret = 0; +err: +	if (thread) +		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; +	binder_unlock(__func__); +	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); +	if (ret && ret != -ERESTARTSYS) +		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); +err_unlocked: +	trace_binder_ioctl_done(ret); +	return ret; +} + +static void binder_vma_open(struct vm_area_struct *vma) +{ +	struct binder_proc *proc = vma->vm_private_data; + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, +		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", +		     proc->pid, vma->vm_start, vma->vm_end, +		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, +		     (unsigned long)pgprot_val(vma->vm_page_prot)); +} + +static void binder_vma_close(struct vm_area_struct *vma) +{ +	struct binder_proc *proc = vma->vm_private_data; + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, +		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", +		     proc->pid, vma->vm_start, vma->vm_end, +		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, +		     (unsigned long)pgprot_val(vma->vm_page_prot)); +	proc->vma = NULL; +	proc->vma_vm_mm = NULL; +	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); +} + +static struct vm_operations_struct binder_vm_ops = { +	.open = binder_vma_open, +	.close = binder_vma_close, +}; + +static int binder_mmap(struct file *filp, struct vm_area_struct *vma) +{ +	int ret; +	struct vm_struct *area; +	struct binder_proc *proc = filp->private_data; +	const char *failure_string; +	struct binder_buffer *buffer; + +	if (proc->tsk != current) +		return -EINVAL; + +	if ((vma->vm_end - vma->vm_start) > SZ_4M) +		vma->vm_end = vma->vm_start + SZ_4M; + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, +		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", +		     proc->pid, vma->vm_start, vma->vm_end, +		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, +		     (unsigned long)pgprot_val(vma->vm_page_prot)); + +	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { +		ret = -EPERM; +		failure_string = "bad vm_flags"; +		goto err_bad_arg; +	} +	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + +	mutex_lock(&binder_mmap_lock); +	if (proc->buffer) { +		ret = -EBUSY; +		failure_string = "already mapped"; +		goto err_already_mapped; +	} + +	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); +	if (area == NULL) { +		ret = -ENOMEM; +		failure_string = "get_vm_area"; +		goto err_get_vm_area_failed; +	} +	proc->buffer = area->addr; +	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; +	mutex_unlock(&binder_mmap_lock); + +#ifdef CONFIG_CPU_CACHE_VIPT +	if (cache_is_vipt_aliasing()) { +		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { +			pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); +			vma->vm_start += PAGE_SIZE; +		} +	} +#endif +	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); +	if (proc->pages == NULL) { +		ret = -ENOMEM; +		failure_string = "alloc page array"; +		goto err_alloc_pages_failed; +	} +	proc->buffer_size = vma->vm_end - vma->vm_start; + +	vma->vm_ops = &binder_vm_ops; +	vma->vm_private_data = proc; + +	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { +		ret = -ENOMEM; +		failure_string = "alloc small buf"; +		goto err_alloc_small_buf_failed; +	} +	buffer = proc->buffer; +	INIT_LIST_HEAD(&proc->buffers); +	list_add(&buffer->entry, &proc->buffers); +	buffer->free = 1; +	binder_insert_free_buffer(proc, buffer); +	proc->free_async_space = proc->buffer_size / 2; +	barrier(); +	proc->files = get_files_struct(current); +	proc->vma = vma; +	proc->vma_vm_mm = vma->vm_mm; + +	/*pr_info("binder_mmap: %d %lx-%lx maps %p\n", +		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ +	return 0; + +err_alloc_small_buf_failed: +	kfree(proc->pages); +	proc->pages = NULL; +err_alloc_pages_failed: +	mutex_lock(&binder_mmap_lock); +	vfree(proc->buffer); +	proc->buffer = NULL; +err_get_vm_area_failed: +err_already_mapped: +	mutex_unlock(&binder_mmap_lock); +err_bad_arg: +	pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", +	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); +	return ret; +} + +static int binder_open(struct inode *nodp, struct file *filp) +{ +	struct binder_proc *proc; + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", +		     current->group_leader->pid, current->pid); + +	proc = kzalloc(sizeof(*proc), GFP_KERNEL); +	if (proc == NULL) +		return -ENOMEM; +	get_task_struct(current); +	proc->tsk = current; +	INIT_LIST_HEAD(&proc->todo); +	init_waitqueue_head(&proc->wait); +	proc->default_priority = task_nice(current); + +	binder_lock(__func__); + +	binder_stats_created(BINDER_STAT_PROC); +	hlist_add_head(&proc->proc_node, &binder_procs); +	proc->pid = current->group_leader->pid; +	INIT_LIST_HEAD(&proc->delivered_death); +	filp->private_data = proc; + +	binder_unlock(__func__); + +	if (binder_debugfs_dir_entry_proc) { +		char strbuf[11]; + +		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); +		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, +			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); +	} + +	return 0; +} + +static int binder_flush(struct file *filp, fl_owner_t id) +{ +	struct binder_proc *proc = filp->private_data; + +	binder_defer_work(proc, BINDER_DEFERRED_FLUSH); + +	return 0; +} + +static void binder_deferred_flush(struct binder_proc *proc) +{ +	struct rb_node *n; +	int wake_count = 0; + +	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { +		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); + +		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; +		if (thread->looper & BINDER_LOOPER_STATE_WAITING) { +			wake_up_interruptible(&thread->wait); +			wake_count++; +		} +	} +	wake_up_interruptible_all(&proc->wait); + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, +		     "binder_flush: %d woke %d threads\n", proc->pid, +		     wake_count); +} + +static int binder_release(struct inode *nodp, struct file *filp) +{ +	struct binder_proc *proc = filp->private_data; + +	debugfs_remove(proc->debugfs_entry); +	binder_defer_work(proc, BINDER_DEFERRED_RELEASE); + +	return 0; +} + +static int binder_node_release(struct binder_node *node, int refs) +{ +	struct binder_ref *ref; +	int death = 0; + +	list_del_init(&node->work.entry); +	binder_release_work(&node->async_todo); + +	if (hlist_empty(&node->refs)) { +		kfree(node); +		binder_stats_deleted(BINDER_STAT_NODE); + +		return refs; +	} + +	node->proc = NULL; +	node->local_strong_refs = 0; +	node->local_weak_refs = 0; +	hlist_add_head(&node->dead_node, &binder_dead_nodes); + +	hlist_for_each_entry(ref, &node->refs, node_entry) { +		refs++; + +		if (!ref->death) +			continue; + +		death++; + +		if (list_empty(&ref->death->work.entry)) { +			ref->death->work.type = BINDER_WORK_DEAD_BINDER; +			list_add_tail(&ref->death->work.entry, +				      &ref->proc->todo); +			wake_up_interruptible(&ref->proc->wait); +		} else +			BUG(); +	} + +	binder_debug(BINDER_DEBUG_DEAD_BINDER, +		     "node %d now dead, refs %d, death %d\n", +		     node->debug_id, refs, death); + +	return refs; +} + +static void binder_deferred_release(struct binder_proc *proc) +{ +	struct binder_transaction *t; +	struct rb_node *n; +	int threads, nodes, incoming_refs, outgoing_refs, buffers, +		active_transactions, page_count; + +	BUG_ON(proc->vma); +	BUG_ON(proc->files); + +	hlist_del(&proc->proc_node); + +	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { +		binder_debug(BINDER_DEBUG_DEAD_BINDER, +			     "%s: %d context_mgr_node gone\n", +			     __func__, proc->pid); +		binder_context_mgr_node = NULL; +	} + +	threads = 0; +	active_transactions = 0; +	while ((n = rb_first(&proc->threads))) { +		struct binder_thread *thread; + +		thread = rb_entry(n, struct binder_thread, rb_node); +		threads++; +		active_transactions += binder_free_thread(proc, thread); +	} + +	nodes = 0; +	incoming_refs = 0; +	while ((n = rb_first(&proc->nodes))) { +		struct binder_node *node; + +		node = rb_entry(n, struct binder_node, rb_node); +		nodes++; +		rb_erase(&node->rb_node, &proc->nodes); +		incoming_refs = binder_node_release(node, incoming_refs); +	} + +	outgoing_refs = 0; +	while ((n = rb_first(&proc->refs_by_desc))) { +		struct binder_ref *ref; + +		ref = rb_entry(n, struct binder_ref, rb_node_desc); +		outgoing_refs++; +		binder_delete_ref(ref); +	} + +	binder_release_work(&proc->todo); +	binder_release_work(&proc->delivered_death); + +	buffers = 0; +	while ((n = rb_first(&proc->allocated_buffers))) { +		struct binder_buffer *buffer; + +		buffer = rb_entry(n, struct binder_buffer, rb_node); + +		t = buffer->transaction; +		if (t) { +			t->buffer = NULL; +			buffer->transaction = NULL; +			pr_err("release proc %d, transaction %d, not freed\n", +			       proc->pid, t->debug_id); +			/*BUG();*/ +		} + +		binder_free_buf(proc, buffer); +		buffers++; +	} + +	binder_stats_deleted(BINDER_STAT_PROC); + +	page_count = 0; +	if (proc->pages) { +		int i; + +		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { +			void *page_addr; + +			if (!proc->pages[i]) +				continue; + +			page_addr = proc->buffer + i * PAGE_SIZE; +			binder_debug(BINDER_DEBUG_BUFFER_ALLOC, +				     "%s: %d: page %d at %p not freed\n", +				     __func__, proc->pid, i, page_addr); +			unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); +			__free_page(proc->pages[i]); +			page_count++; +		} +		kfree(proc->pages); +		vfree(proc->buffer); +	} + +	put_task_struct(proc->tsk); + +	binder_debug(BINDER_DEBUG_OPEN_CLOSE, +		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", +		     __func__, proc->pid, threads, nodes, incoming_refs, +		     outgoing_refs, active_transactions, buffers, page_count); + +	kfree(proc); +} + +static void binder_deferred_func(struct work_struct *work) +{ +	struct binder_proc *proc; +	struct files_struct *files; + +	int defer; + +	do { +		binder_lock(__func__); +		mutex_lock(&binder_deferred_lock); +		if (!hlist_empty(&binder_deferred_list)) { +			proc = hlist_entry(binder_deferred_list.first, +					struct binder_proc, deferred_work_node); +			hlist_del_init(&proc->deferred_work_node); +			defer = proc->deferred_work; +			proc->deferred_work = 0; +		} else { +			proc = NULL; +			defer = 0; +		} +		mutex_unlock(&binder_deferred_lock); + +		files = NULL; +		if (defer & BINDER_DEFERRED_PUT_FILES) { +			files = proc->files; +			if (files) +				proc->files = NULL; +		} + +		if (defer & BINDER_DEFERRED_FLUSH) +			binder_deferred_flush(proc); + +		if (defer & BINDER_DEFERRED_RELEASE) +			binder_deferred_release(proc); /* frees proc */ + +		binder_unlock(__func__); +		if (files) +			put_files_struct(files); +	} while (proc); +} +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); + +static void +binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) +{ +	mutex_lock(&binder_deferred_lock); +	proc->deferred_work |= defer; +	if (hlist_unhashed(&proc->deferred_work_node)) { +		hlist_add_head(&proc->deferred_work_node, +				&binder_deferred_list); +		queue_work(binder_deferred_workqueue, &binder_deferred_work); +	} +	mutex_unlock(&binder_deferred_lock); +} + +static void print_binder_transaction(struct seq_file *m, const char *prefix, +				     struct binder_transaction *t) +{ +	seq_printf(m, +		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", +		   prefix, t->debug_id, t, +		   t->from ? t->from->proc->pid : 0, +		   t->from ? t->from->pid : 0, +		   t->to_proc ? t->to_proc->pid : 0, +		   t->to_thread ? t->to_thread->pid : 0, +		   t->code, t->flags, t->priority, t->need_reply); +	if (t->buffer == NULL) { +		seq_puts(m, " buffer free\n"); +		return; +	} +	if (t->buffer->target_node) +		seq_printf(m, " node %d", +			   t->buffer->target_node->debug_id); +	seq_printf(m, " size %zd:%zd data %p\n", +		   t->buffer->data_size, t->buffer->offsets_size, +		   t->buffer->data); +} + +static void print_binder_buffer(struct seq_file *m, const char *prefix, +				struct binder_buffer *buffer) +{ +	seq_printf(m, "%s %d: %p size %zd:%zd %s\n", +		   prefix, buffer->debug_id, buffer->data, +		   buffer->data_size, buffer->offsets_size, +		   buffer->transaction ? "active" : "delivered"); +} + +static void print_binder_work(struct seq_file *m, const char *prefix, +			      const char *transaction_prefix, +			      struct binder_work *w) +{ +	struct binder_node *node; +	struct binder_transaction *t; + +	switch (w->type) { +	case BINDER_WORK_TRANSACTION: +		t = container_of(w, struct binder_transaction, work); +		print_binder_transaction(m, transaction_prefix, t); +		break; +	case BINDER_WORK_TRANSACTION_COMPLETE: +		seq_printf(m, "%stransaction complete\n", prefix); +		break; +	case BINDER_WORK_NODE: +		node = container_of(w, struct binder_node, work); +		seq_printf(m, "%snode work %d: u%016llx c%016llx\n", +			   prefix, node->debug_id, +			   (u64)node->ptr, (u64)node->cookie); +		break; +	case BINDER_WORK_DEAD_BINDER: +		seq_printf(m, "%shas dead binder\n", prefix); +		break; +	case BINDER_WORK_DEAD_BINDER_AND_CLEAR: +		seq_printf(m, "%shas cleared dead binder\n", prefix); +		break; +	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: +		seq_printf(m, "%shas cleared death notification\n", prefix); +		break; +	default: +		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); +		break; +	} +} + +static void print_binder_thread(struct seq_file *m, +				struct binder_thread *thread, +				int print_always) +{ +	struct binder_transaction *t; +	struct binder_work *w; +	size_t start_pos = m->count; +	size_t header_pos; + +	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper); +	header_pos = m->count; +	t = thread->transaction_stack; +	while (t) { +		if (t->from == thread) { +			print_binder_transaction(m, +						 "    outgoing transaction", t); +			t = t->from_parent; +		} else if (t->to_thread == thread) { +			print_binder_transaction(m, +						 "    incoming transaction", t); +			t = t->to_parent; +		} else { +			print_binder_transaction(m, "    bad transaction", t); +			t = NULL; +		} +	} +	list_for_each_entry(w, &thread->todo, entry) { +		print_binder_work(m, "    ", "    pending transaction", w); +	} +	if (!print_always && m->count == header_pos) +		m->count = start_pos; +} + +static void print_binder_node(struct seq_file *m, struct binder_node *node) +{ +	struct binder_ref *ref; +	struct binder_work *w; +	int count; + +	count = 0; +	hlist_for_each_entry(ref, &node->refs, node_entry) +		count++; + +	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", +		   node->debug_id, (u64)node->ptr, (u64)node->cookie, +		   node->has_strong_ref, node->has_weak_ref, +		   node->local_strong_refs, node->local_weak_refs, +		   node->internal_strong_refs, count); +	if (count) { +		seq_puts(m, " proc"); +		hlist_for_each_entry(ref, &node->refs, node_entry) +			seq_printf(m, " %d", ref->proc->pid); +	} +	seq_puts(m, "\n"); +	list_for_each_entry(w, &node->async_todo, entry) +		print_binder_work(m, "    ", +				  "    pending async transaction", w); +} + +static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) +{ +	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n", +		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", +		   ref->node->debug_id, ref->strong, ref->weak, ref->death); +} + +static void print_binder_proc(struct seq_file *m, +			      struct binder_proc *proc, int print_all) +{ +	struct binder_work *w; +	struct rb_node *n; +	size_t start_pos = m->count; +	size_t header_pos; + +	seq_printf(m, "proc %d\n", proc->pid); +	header_pos = m->count; + +	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) +		print_binder_thread(m, rb_entry(n, struct binder_thread, +						rb_node), print_all); +	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { +		struct binder_node *node = rb_entry(n, struct binder_node, +						    rb_node); +		if (print_all || node->has_async_transaction) +			print_binder_node(m, node); +	} +	if (print_all) { +		for (n = rb_first(&proc->refs_by_desc); +		     n != NULL; +		     n = rb_next(n)) +			print_binder_ref(m, rb_entry(n, struct binder_ref, +						     rb_node_desc)); +	} +	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) +		print_binder_buffer(m, "  buffer", +				    rb_entry(n, struct binder_buffer, rb_node)); +	list_for_each_entry(w, &proc->todo, entry) +		print_binder_work(m, "  ", "  pending transaction", w); +	list_for_each_entry(w, &proc->delivered_death, entry) { +		seq_puts(m, "  has delivered dead binder\n"); +		break; +	} +	if (!print_all && m->count == header_pos) +		m->count = start_pos; +} + +static const char * const binder_return_strings[] = { +	"BR_ERROR", +	"BR_OK", +	"BR_TRANSACTION", +	"BR_REPLY", +	"BR_ACQUIRE_RESULT", +	"BR_DEAD_REPLY", +	"BR_TRANSACTION_COMPLETE", +	"BR_INCREFS", +	"BR_ACQUIRE", +	"BR_RELEASE", +	"BR_DECREFS", +	"BR_ATTEMPT_ACQUIRE", +	"BR_NOOP", +	"BR_SPAWN_LOOPER", +	"BR_FINISHED", +	"BR_DEAD_BINDER", +	"BR_CLEAR_DEATH_NOTIFICATION_DONE", +	"BR_FAILED_REPLY" +}; + +static const char * const binder_command_strings[] = { +	"BC_TRANSACTION", +	"BC_REPLY", +	"BC_ACQUIRE_RESULT", +	"BC_FREE_BUFFER", +	"BC_INCREFS", +	"BC_ACQUIRE", +	"BC_RELEASE", +	"BC_DECREFS", +	"BC_INCREFS_DONE", +	"BC_ACQUIRE_DONE", +	"BC_ATTEMPT_ACQUIRE", +	"BC_REGISTER_LOOPER", +	"BC_ENTER_LOOPER", +	"BC_EXIT_LOOPER", +	"BC_REQUEST_DEATH_NOTIFICATION", +	"BC_CLEAR_DEATH_NOTIFICATION", +	"BC_DEAD_BINDER_DONE" +}; + +static const char * const binder_objstat_strings[] = { +	"proc", +	"thread", +	"node", +	"ref", +	"death", +	"transaction", +	"transaction_complete" +}; + +static void print_binder_stats(struct seq_file *m, const char *prefix, +			       struct binder_stats *stats) +{ +	int i; + +	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != +		     ARRAY_SIZE(binder_command_strings)); +	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { +		if (stats->bc[i]) +			seq_printf(m, "%s%s: %d\n", prefix, +				   binder_command_strings[i], stats->bc[i]); +	} + +	BUILD_BUG_ON(ARRAY_SIZE(stats->br) != +		     ARRAY_SIZE(binder_return_strings)); +	for (i = 0; i < ARRAY_SIZE(stats->br); i++) { +		if (stats->br[i]) +			seq_printf(m, "%s%s: %d\n", prefix, +				   binder_return_strings[i], stats->br[i]); +	} + +	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != +		     ARRAY_SIZE(binder_objstat_strings)); +	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != +		     ARRAY_SIZE(stats->obj_deleted)); +	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { +		if (stats->obj_created[i] || stats->obj_deleted[i]) +			seq_printf(m, "%s%s: active %d total %d\n", prefix, +				binder_objstat_strings[i], +				stats->obj_created[i] - stats->obj_deleted[i], +				stats->obj_created[i]); +	} +} + +static void print_binder_proc_stats(struct seq_file *m, +				    struct binder_proc *proc) +{ +	struct binder_work *w; +	struct rb_node *n; +	int count, strong, weak; + +	seq_printf(m, "proc %d\n", proc->pid); +	count = 0; +	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) +		count++; +	seq_printf(m, "  threads: %d\n", count); +	seq_printf(m, "  requested threads: %d+%d/%d\n" +			"  ready threads %d\n" +			"  free async space %zd\n", proc->requested_threads, +			proc->requested_threads_started, proc->max_threads, +			proc->ready_threads, proc->free_async_space); +	count = 0; +	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) +		count++; +	seq_printf(m, "  nodes: %d\n", count); +	count = 0; +	strong = 0; +	weak = 0; +	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { +		struct binder_ref *ref = rb_entry(n, struct binder_ref, +						  rb_node_desc); +		count++; +		strong += ref->strong; +		weak += ref->weak; +	} +	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak); + +	count = 0; +	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) +		count++; +	seq_printf(m, "  buffers: %d\n", count); + +	count = 0; +	list_for_each_entry(w, &proc->todo, entry) { +		switch (w->type) { +		case BINDER_WORK_TRANSACTION: +			count++; +			break; +		default: +			break; +		} +	} +	seq_printf(m, "  pending transactions: %d\n", count); + +	print_binder_stats(m, "  ", &proc->stats); +} + + +static int binder_state_show(struct seq_file *m, void *unused) +{ +	struct binder_proc *proc; +	struct binder_node *node; +	int do_lock = !binder_debug_no_lock; + +	if (do_lock) +		binder_lock(__func__); + +	seq_puts(m, "binder state:\n"); + +	if (!hlist_empty(&binder_dead_nodes)) +		seq_puts(m, "dead nodes:\n"); +	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) +		print_binder_node(m, node); + +	hlist_for_each_entry(proc, &binder_procs, proc_node) +		print_binder_proc(m, proc, 1); +	if (do_lock) +		binder_unlock(__func__); +	return 0; +} + +static int binder_stats_show(struct seq_file *m, void *unused) +{ +	struct binder_proc *proc; +	int do_lock = !binder_debug_no_lock; + +	if (do_lock) +		binder_lock(__func__); + +	seq_puts(m, "binder stats:\n"); + +	print_binder_stats(m, "", &binder_stats); + +	hlist_for_each_entry(proc, &binder_procs, proc_node) +		print_binder_proc_stats(m, proc); +	if (do_lock) +		binder_unlock(__func__); +	return 0; +} + +static int binder_transactions_show(struct seq_file *m, void *unused) +{ +	struct binder_proc *proc; +	int do_lock = !binder_debug_no_lock; + +	if (do_lock) +		binder_lock(__func__); + +	seq_puts(m, "binder transactions:\n"); +	hlist_for_each_entry(proc, &binder_procs, proc_node) +		print_binder_proc(m, proc, 0); +	if (do_lock) +		binder_unlock(__func__); +	return 0; +} + +static int binder_proc_show(struct seq_file *m, void *unused) +{ +	struct binder_proc *proc = m->private; +	int do_lock = !binder_debug_no_lock; + +	if (do_lock) +		binder_lock(__func__); +	seq_puts(m, "binder proc state:\n"); +	print_binder_proc(m, proc, 1); +	if (do_lock) +		binder_unlock(__func__); +	return 0; +} + +static void print_binder_transaction_log_entry(struct seq_file *m, +					struct binder_transaction_log_entry *e) +{ +	seq_printf(m, +		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", +		   e->debug_id, (e->call_type == 2) ? "reply" : +		   ((e->call_type == 1) ? "async" : "call "), e->from_proc, +		   e->from_thread, e->to_proc, e->to_thread, e->to_node, +		   e->target_handle, e->data_size, e->offsets_size); +} + +static int binder_transaction_log_show(struct seq_file *m, void *unused) +{ +	struct binder_transaction_log *log = m->private; +	int i; + +	if (log->full) { +		for (i = log->next; i < ARRAY_SIZE(log->entry); i++) +			print_binder_transaction_log_entry(m, &log->entry[i]); +	} +	for (i = 0; i < log->next; i++) +		print_binder_transaction_log_entry(m, &log->entry[i]); +	return 0; +} + +static const struct file_operations binder_fops = { +	.owner = THIS_MODULE, +	.poll = binder_poll, +	.unlocked_ioctl = binder_ioctl, +	.compat_ioctl = binder_ioctl, +	.mmap = binder_mmap, +	.open = binder_open, +	.flush = binder_flush, +	.release = binder_release, +}; + +static struct miscdevice binder_miscdev = { +	.minor = MISC_DYNAMIC_MINOR, +	.name = "binder", +	.fops = &binder_fops +}; + +BINDER_DEBUG_ENTRY(state); +BINDER_DEBUG_ENTRY(stats); +BINDER_DEBUG_ENTRY(transactions); +BINDER_DEBUG_ENTRY(transaction_log); + +static int __init binder_init(void) +{ +	int ret; + +	binder_deferred_workqueue = create_singlethread_workqueue("binder"); +	if (!binder_deferred_workqueue) +		return -ENOMEM; + +	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); +	if (binder_debugfs_dir_entry_root) +		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", +						 binder_debugfs_dir_entry_root); +	ret = misc_register(&binder_miscdev); +	if (binder_debugfs_dir_entry_root) { +		debugfs_create_file("state", +				    S_IRUGO, +				    binder_debugfs_dir_entry_root, +				    NULL, +				    &binder_state_fops); +		debugfs_create_file("stats", +				    S_IRUGO, +				    binder_debugfs_dir_entry_root, +				    NULL, +				    &binder_stats_fops); +		debugfs_create_file("transactions", +				    S_IRUGO, +				    binder_debugfs_dir_entry_root, +				    NULL, +				    &binder_transactions_fops); +		debugfs_create_file("transaction_log", +				    S_IRUGO, +				    binder_debugfs_dir_entry_root, +				    &binder_transaction_log, +				    &binder_transaction_log_fops); +		debugfs_create_file("failed_transaction_log", +				    S_IRUGO, +				    binder_debugfs_dir_entry_root, +				    &binder_transaction_log_failed, +				    &binder_transaction_log_fops); +	} +	return ret; +} + +device_initcall(binder_init); + +#define CREATE_TRACE_POINTS +#include "binder_trace.h" + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h new file mode 100644 index 00000000000..eb0834656df --- /dev/null +++ b/drivers/staging/android/binder.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_BINDER_H +#define _LINUX_BINDER_H + +#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT +#define BINDER_IPC_32BIT 1 +#endif + +#include "uapi/binder.h" + +#endif /* _LINUX_BINDER_H */ + diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h new file mode 100644 index 00000000000..7f20f3dc836 --- /dev/null +++ b/drivers/staging/android/binder_trace.h @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM binder + +#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _BINDER_TRACE_H + +#include <linux/tracepoint.h> + +struct binder_buffer; +struct binder_node; +struct binder_proc; +struct binder_ref; +struct binder_thread; +struct binder_transaction; + +TRACE_EVENT(binder_ioctl, +	TP_PROTO(unsigned int cmd, unsigned long arg), +	TP_ARGS(cmd, arg), + +	TP_STRUCT__entry( +		__field(unsigned int, cmd) +		__field(unsigned long, arg) +	), +	TP_fast_assign( +		__entry->cmd = cmd; +		__entry->arg = arg; +	), +	TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg) +); + +DECLARE_EVENT_CLASS(binder_lock_class, +	TP_PROTO(const char *tag), +	TP_ARGS(tag), +	TP_STRUCT__entry( +		__field(const char *, tag) +	), +	TP_fast_assign( +		__entry->tag = tag; +	), +	TP_printk("tag=%s", __entry->tag) +); + +#define DEFINE_BINDER_LOCK_EVENT(name)	\ +DEFINE_EVENT(binder_lock_class, name,	\ +	TP_PROTO(const char *func), \ +	TP_ARGS(func)) + +DEFINE_BINDER_LOCK_EVENT(binder_lock); +DEFINE_BINDER_LOCK_EVENT(binder_locked); +DEFINE_BINDER_LOCK_EVENT(binder_unlock); + +DECLARE_EVENT_CLASS(binder_function_return_class, +	TP_PROTO(int ret), +	TP_ARGS(ret), +	TP_STRUCT__entry( +		__field(int, ret) +	), +	TP_fast_assign( +		__entry->ret = ret; +	), +	TP_printk("ret=%d", __entry->ret) +); + +#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name)	\ +DEFINE_EVENT(binder_function_return_class, name,	\ +	TP_PROTO(int ret), \ +	TP_ARGS(ret)) + +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); +DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); + +TRACE_EVENT(binder_wait_for_work, +	TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), +	TP_ARGS(proc_work, transaction_stack, thread_todo), + +	TP_STRUCT__entry( +		__field(bool, proc_work) +		__field(bool, transaction_stack) +		__field(bool, thread_todo) +	), +	TP_fast_assign( +		__entry->proc_work = proc_work; +		__entry->transaction_stack = transaction_stack; +		__entry->thread_todo = thread_todo; +	), +	TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d", +		  __entry->proc_work, __entry->transaction_stack, +		  __entry->thread_todo) +); + +TRACE_EVENT(binder_transaction, +	TP_PROTO(bool reply, struct binder_transaction *t, +		 struct binder_node *target_node), +	TP_ARGS(reply, t, target_node), +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(int, target_node) +		__field(int, to_proc) +		__field(int, to_thread) +		__field(int, reply) +		__field(unsigned int, code) +		__field(unsigned int, flags) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +		__entry->target_node = target_node ? target_node->debug_id : 0; +		__entry->to_proc = t->to_proc->pid; +		__entry->to_thread = t->to_thread ? t->to_thread->pid : 0; +		__entry->reply = reply; +		__entry->code = t->code; +		__entry->flags = t->flags; +	), +	TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x", +		  __entry->debug_id, __entry->target_node, +		  __entry->to_proc, __entry->to_thread, +		  __entry->reply, __entry->flags, __entry->code) +); + +TRACE_EVENT(binder_transaction_received, +	TP_PROTO(struct binder_transaction *t), +	TP_ARGS(t), + +	TP_STRUCT__entry( +		__field(int, debug_id) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +	), +	TP_printk("transaction=%d", __entry->debug_id) +); + +TRACE_EVENT(binder_transaction_node_to_ref, +	TP_PROTO(struct binder_transaction *t, struct binder_node *node, +		 struct binder_ref *ref), +	TP_ARGS(t, node, ref), + +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(int, node_debug_id) +		__field(binder_uintptr_t, node_ptr) +		__field(int, ref_debug_id) +		__field(uint32_t, ref_desc) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +		__entry->node_debug_id = node->debug_id; +		__entry->node_ptr = node->ptr; +		__entry->ref_debug_id = ref->debug_id; +		__entry->ref_desc = ref->desc; +	), +	TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d", +		  __entry->debug_id, __entry->node_debug_id, +		  (u64)__entry->node_ptr, +		  __entry->ref_debug_id, __entry->ref_desc) +); + +TRACE_EVENT(binder_transaction_ref_to_node, +	TP_PROTO(struct binder_transaction *t, struct binder_ref *ref), +	TP_ARGS(t, ref), + +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(int, ref_debug_id) +		__field(uint32_t, ref_desc) +		__field(int, node_debug_id) +		__field(binder_uintptr_t, node_ptr) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +		__entry->ref_debug_id = ref->debug_id; +		__entry->ref_desc = ref->desc; +		__entry->node_debug_id = ref->node->debug_id; +		__entry->node_ptr = ref->node->ptr; +	), +	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx", +		  __entry->debug_id, __entry->node_debug_id, +		  __entry->ref_debug_id, __entry->ref_desc, +		  (u64)__entry->node_ptr) +); + +TRACE_EVENT(binder_transaction_ref_to_ref, +	TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref, +		 struct binder_ref *dest_ref), +	TP_ARGS(t, src_ref, dest_ref), + +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(int, node_debug_id) +		__field(int, src_ref_debug_id) +		__field(uint32_t, src_ref_desc) +		__field(int, dest_ref_debug_id) +		__field(uint32_t, dest_ref_desc) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +		__entry->node_debug_id = src_ref->node->debug_id; +		__entry->src_ref_debug_id = src_ref->debug_id; +		__entry->src_ref_desc = src_ref->desc; +		__entry->dest_ref_debug_id = dest_ref->debug_id; +		__entry->dest_ref_desc = dest_ref->desc; +	), +	TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d", +		  __entry->debug_id, __entry->node_debug_id, +		  __entry->src_ref_debug_id, __entry->src_ref_desc, +		  __entry->dest_ref_debug_id, __entry->dest_ref_desc) +); + +TRACE_EVENT(binder_transaction_fd, +	TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd), +	TP_ARGS(t, src_fd, dest_fd), + +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(int, src_fd) +		__field(int, dest_fd) +	), +	TP_fast_assign( +		__entry->debug_id = t->debug_id; +		__entry->src_fd = src_fd; +		__entry->dest_fd = dest_fd; +	), +	TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d", +		  __entry->debug_id, __entry->src_fd, __entry->dest_fd) +); + +DECLARE_EVENT_CLASS(binder_buffer_class, +	TP_PROTO(struct binder_buffer *buf), +	TP_ARGS(buf), +	TP_STRUCT__entry( +		__field(int, debug_id) +		__field(size_t, data_size) +		__field(size_t, offsets_size) +	), +	TP_fast_assign( +		__entry->debug_id = buf->debug_id; +		__entry->data_size = buf->data_size; +		__entry->offsets_size = buf->offsets_size; +	), +	TP_printk("transaction=%d data_size=%zd offsets_size=%zd", +		  __entry->debug_id, __entry->data_size, __entry->offsets_size) +); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf, +	TP_PROTO(struct binder_buffer *buffer), +	TP_ARGS(buffer)); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release, +	TP_PROTO(struct binder_buffer *buffer), +	TP_ARGS(buffer)); + +DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release, +	TP_PROTO(struct binder_buffer *buffer), +	TP_ARGS(buffer)); + +TRACE_EVENT(binder_update_page_range, +	TP_PROTO(struct binder_proc *proc, bool allocate, +		 void *start, void *end), +	TP_ARGS(proc, allocate, start, end), +	TP_STRUCT__entry( +		__field(int, proc) +		__field(bool, allocate) +		__field(size_t, offset) +		__field(size_t, size) +	), +	TP_fast_assign( +		__entry->proc = proc->pid; +		__entry->allocate = allocate; +		__entry->offset = start - proc->buffer; +		__entry->size = end - start; +	), +	TP_printk("proc=%d allocate=%d offset=%zu size=%zu", +		  __entry->proc, __entry->allocate, +		  __entry->offset, __entry->size) +); + +TRACE_EVENT(binder_command, +	TP_PROTO(uint32_t cmd), +	TP_ARGS(cmd), +	TP_STRUCT__entry( +		__field(uint32_t, cmd) +	), +	TP_fast_assign( +		__entry->cmd = cmd; +	), +	TP_printk("cmd=0x%x %s", +		  __entry->cmd, +		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ? +			  binder_command_strings[_IOC_NR(__entry->cmd)] : +			  "unknown") +); + +TRACE_EVENT(binder_return, +	TP_PROTO(uint32_t cmd), +	TP_ARGS(cmd), +	TP_STRUCT__entry( +		__field(uint32_t, cmd) +	), +	TP_fast_assign( +		__entry->cmd = cmd; +	), +	TP_printk("cmd=0x%x %s", +		  __entry->cmd, +		  _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ? +			  binder_return_strings[_IOC_NR(__entry->cmd)] : +			  "unknown") +); + +#endif /* _BINDER_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE binder_trace +#include <trace/define_trace.h> diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 00000000000..0f8fec1f84e --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION +	bool "Ion Memory Manager" +	depends on HAVE_MEMBLOCK +	select GENERIC_ALLOCATOR +	select DMA_SHARED_BUFFER +	---help--- +	  Chose this option to enable the ION Memory Manager, +	  used by Android to efficiently allocate buffers +	  from userspace that can be shared between drivers. +	  If you're not using Android its probably safe to +	  say N here. + +config ION_TEST +	tristate "Ion Test Device" +	depends on ION +	help +	  Choose this option to create a device that can be used to test the +	  kernel and device side ION functions. + +config ION_DUMMY +	bool "Dummy Ion driver" +	depends on ION +	help +	  Provides a dummy ION driver that registers the +	  /dev/ion device and some basic heaps. This can +	  be used for testing the ION infrastructure if +	  one doesn't have access to hardware drivers that +	  use ION. + +config ION_TEGRA +	tristate "Ion for Tegra" +	depends on ARCH_TEGRA && ION +	help +	  Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 00000000000..b56fd2bf2b4 --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ +			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 00000000000..ee3a7380e53 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { +	compat_size_t len; +	compat_size_t align; +	compat_uint_t heap_id_mask; +	compat_uint_t flags; +	compat_int_t handle; +}; + +struct compat_ion_custom_data { +	compat_uint_t cmd; +	compat_ulong_t arg; +}; + +struct compat_ion_handle_data { +	compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC	_IOWR(ION_IOC_MAGIC, 0, \ +				      struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, \ +				      struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM	_IOWR(ION_IOC_MAGIC, 6, \ +				      struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( +			struct compat_ion_allocation_data __user *data32, +			struct ion_allocation_data __user *data) +{ +	compat_size_t s; +	compat_uint_t u; +	compat_int_t i; +	int err; + +	err = get_user(s, &data32->len); +	err |= put_user(s, &data->len); +	err |= get_user(s, &data32->align); +	err |= put_user(s, &data->align); +	err |= get_user(u, &data32->heap_id_mask); +	err |= put_user(u, &data->heap_id_mask); +	err |= get_user(u, &data32->flags); +	err |= put_user(u, &data->flags); +	err |= get_user(i, &data32->handle); +	err |= put_user(i, &data->handle); + +	return err; +} + +static int compat_get_ion_handle_data( +			struct compat_ion_handle_data __user *data32, +			struct ion_handle_data __user *data) +{ +	compat_int_t i; +	int err; + +	err = get_user(i, &data32->handle); +	err |= put_user(i, &data->handle); + +	return err; +} + +static int compat_put_ion_allocation_data( +			struct compat_ion_allocation_data __user *data32, +			struct ion_allocation_data __user *data) +{ +	compat_size_t s; +	compat_uint_t u; +	compat_int_t i; +	int err; + +	err = get_user(s, &data->len); +	err |= put_user(s, &data32->len); +	err |= get_user(s, &data->align); +	err |= put_user(s, &data32->align); +	err |= get_user(u, &data->heap_id_mask); +	err |= put_user(u, &data32->heap_id_mask); +	err |= get_user(u, &data->flags); +	err |= put_user(u, &data32->flags); +	err |= get_user(i, &data->handle); +	err |= put_user(i, &data32->handle); + +	return err; +} + +static int compat_get_ion_custom_data( +			struct compat_ion_custom_data __user *data32, +			struct ion_custom_data __user *data) +{ +	compat_uint_t cmd; +	compat_ulong_t arg; +	int err; + +	err = get_user(cmd, &data32->cmd); +	err |= put_user(cmd, &data->cmd); +	err |= get_user(arg, &data32->arg); +	err |= put_user(arg, &data->arg); + +	return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ +	long ret; + +	if (!filp->f_op || !filp->f_op->unlocked_ioctl) +		return -ENOTTY; + +	switch (cmd) { +	case COMPAT_ION_IOC_ALLOC: +	{ +		struct compat_ion_allocation_data __user *data32; +		struct ion_allocation_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_allocation_data(data32, data); +		if (err) +			return err; +		ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, +							(unsigned long)data); +		err = compat_put_ion_allocation_data(data32, data); +		return ret ? ret : err; +	} +	case COMPAT_ION_IOC_FREE: +	{ +		struct compat_ion_handle_data __user *data32; +		struct ion_handle_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_handle_data(data32, data); +		if (err) +			return err; + +		return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, +							(unsigned long)data); +	} +	case COMPAT_ION_IOC_CUSTOM: { +		struct compat_ion_custom_data __user *data32; +		struct ion_custom_data __user *data; +		int err; + +		data32 = compat_ptr(arg); +		data = compat_alloc_user_space(sizeof(*data)); +		if (data == NULL) +			return -EFAULT; + +		err = compat_get_ion_custom_data(data32, data); +		if (err) +			return err; + +		return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, +							(unsigned long)data); +	} +	case ION_IOC_SHARE: +	case ION_IOC_MAP: +	case ION_IOC_IMPORT: +	case ION_IOC_SYNC: +		return filp->f_op->unlocked_ioctl(filp, cmd, +						(unsigned long)compat_ptr(arg)); +	default: +		return -ENOIOCTLCMD; +	} +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 00000000000..c2ad5893dfd --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,30 @@ +/* + + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl  NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 00000000000..389b8f67a2e --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1645 @@ +/* + + * drivers/staging/android/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/file.h> +#include <linux/freezer.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/miscdevice.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/debugfs.h> +#include <linux/dma-buf.h> +#include <linux/idr.h> + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev:		the actual misc device + * @buffers:		an rb tree of all the existing buffers + * @buffer_lock:	lock protecting the tree of buffers + * @lock:		rwsem protecting the tree of heaps and clients + * @heaps:		list of all the heaps in the system + * @user_clients:	list of all the clients created from userspace + */ +struct ion_device { +	struct miscdevice dev; +	struct rb_root buffers; +	struct mutex buffer_lock; +	struct rw_semaphore lock; +	struct plist_head heaps; +	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, +			     unsigned long arg); +	struct rb_root clients; +	struct dentry *debug_root; +	struct dentry *heaps_debug_root; +	struct dentry *clients_debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node:		node in the tree of all clients + * @dev:		backpointer to ion device + * @handles:		an rb tree of all the handles in this client + * @idr:		an idr space for allocating handle ids + * @lock:		lock protecting the tree of handles + * @name:		used for debugging + * @display_name:	used for debugging (unique version of @name) + * @display_serial:	used for debugging (to make display_name unique) + * @task:		used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { +	struct rb_node node; +	struct ion_device *dev; +	struct rb_root handles; +	struct idr idr; +	struct mutex lock; +	const char *name; +	char *display_name; +	int display_serial; +	struct task_struct *task; +	pid_t pid; +	struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref:		reference count + * @client:		back pointer to the client the buffer resides in + * @buffer:		pointer to the buffer + * @node:		node in the client's handle rbtree + * @kmap_cnt:		count of times this client has mapped to kernel + * @id:			client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client.  Other fields are never changed after initialization. + */ +struct ion_handle { +	struct kref ref; +	struct ion_client *client; +	struct ion_buffer *buffer; +	struct rb_node node; +	unsigned int kmap_cnt; +	int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ +	return (buffer->flags & ION_FLAG_CACHED) && +		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ +	return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ +	return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ +	return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ +	*page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ +	*page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, +			   struct ion_buffer *buffer) +{ +	struct rb_node **p = &dev->buffers.rb_node; +	struct rb_node *parent = NULL; +	struct ion_buffer *entry; + +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_buffer, node); + +		if (buffer < entry) { +			p = &(*p)->rb_left; +		} else if (buffer > entry) { +			p = &(*p)->rb_right; +		} else { +			pr_err("%s: buffer already found.", __func__); +			BUG(); +		} +	} + +	rb_link_node(&buffer->node, parent, p); +	rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, +				     struct ion_device *dev, +				     unsigned long len, +				     unsigned long align, +				     unsigned long flags) +{ +	struct ion_buffer *buffer; +	struct sg_table *table; +	struct scatterlist *sg; +	int i, ret; + +	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); +	if (!buffer) +		return ERR_PTR(-ENOMEM); + +	buffer->heap = heap; +	buffer->flags = flags; +	kref_init(&buffer->ref); + +	ret = heap->ops->allocate(heap, buffer, len, align, flags); + +	if (ret) { +		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) +			goto err2; + +		ion_heap_freelist_drain(heap, 0); +		ret = heap->ops->allocate(heap, buffer, len, align, +					  flags); +		if (ret) +			goto err2; +	} + +	buffer->dev = dev; +	buffer->size = len; + +	table = heap->ops->map_dma(heap, buffer); +	if (WARN_ONCE(table == NULL, +			"heap->ops->map_dma should return ERR_PTR on error")) +		table = ERR_PTR(-EINVAL); +	if (IS_ERR(table)) { +		heap->ops->free(buffer); +		kfree(buffer); +		return ERR_CAST(table); +	} +	buffer->sg_table = table; +	if (ion_buffer_fault_user_mappings(buffer)) { +		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +		struct scatterlist *sg; +		int i, j, k = 0; + +		buffer->pages = vmalloc(sizeof(struct page *) * num_pages); +		if (!buffer->pages) { +			ret = -ENOMEM; +			goto err1; +		} + +		for_each_sg(table->sgl, sg, table->nents, i) { +			struct page *page = sg_page(sg); + +			for (j = 0; j < sg->length / PAGE_SIZE; j++) +				buffer->pages[k++] = page++; +		} + +		if (ret) +			goto err; +	} + +	buffer->dev = dev; +	buffer->size = len; +	INIT_LIST_HEAD(&buffer->vmas); +	mutex_init(&buffer->lock); +	/* this will set up dma addresses for the sglist -- it is not +	   technically correct as per the dma api -- a specific +	   device isn't really taking ownership here.  However, in practice on +	   our systems the only dma_address space is physical addresses. +	   Additionally, we can't afford the overhead of invalidating every +	   allocation via dma_map_sg. The implicit contract here is that +	   memory comming from the heaps is ready for dma, ie if it has a +	   cached mapping that mapping has been invalidated */ +	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) +		sg_dma_address(sg) = sg_phys(sg); +	mutex_lock(&dev->buffer_lock); +	ion_buffer_add(dev, buffer); +	mutex_unlock(&dev->buffer_lock); +	return buffer; + +err: +	heap->ops->unmap_dma(heap, buffer); +	heap->ops->free(buffer); +err1: +	if (buffer->pages) +		vfree(buffer->pages); +err2: +	kfree(buffer); +	return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ +	if (WARN_ON(buffer->kmap_cnt > 0)) +		buffer->heap->ops->unmap_kernel(buffer->heap, buffer); +	buffer->heap->ops->unmap_dma(buffer->heap, buffer); +	buffer->heap->ops->free(buffer); +	if (buffer->pages) +		vfree(buffer->pages); +	kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ +	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); +	struct ion_heap *heap = buffer->heap; +	struct ion_device *dev = buffer->dev; + +	mutex_lock(&dev->buffer_lock); +	rb_erase(&buffer->node, &dev->buffers); +	mutex_unlock(&dev->buffer_lock); + +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		ion_heap_freelist_add(heap, buffer); +	else +		ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ +	kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ +	return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ +	mutex_lock(&buffer->lock); +	buffer->handle_count++; +	mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ +	/* +	 * when a buffer is removed from a handle, if it is not in +	 * any other handles, copy the taskcomm and the pid of the +	 * process it's being removed from into the buffer.  At this +	 * point there will be no way to track what processes this buffer is +	 * being used by, it only exists as a dma_buf file descriptor. +	 * The taskcomm and pid can provide a debug hint as to where this fd +	 * is in the system +	 */ +	mutex_lock(&buffer->lock); +	buffer->handle_count--; +	BUG_ON(buffer->handle_count < 0); +	if (!buffer->handle_count) { +		struct task_struct *task; + +		task = current->group_leader; +		get_task_comm(buffer->task_comm, task); +		buffer->pid = task_pid_nr(task); +	} +	mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, +				     struct ion_buffer *buffer) +{ +	struct ion_handle *handle; + +	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); +	if (!handle) +		return ERR_PTR(-ENOMEM); +	kref_init(&handle->ref); +	RB_CLEAR_NODE(&handle->node); +	handle->client = client; +	ion_buffer_get(buffer); +	ion_buffer_add_to_handle(buffer); +	handle->buffer = buffer; + +	return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ +	struct ion_handle *handle = container_of(kref, struct ion_handle, ref); +	struct ion_client *client = handle->client; +	struct ion_buffer *buffer = handle->buffer; + +	mutex_lock(&buffer->lock); +	while (handle->kmap_cnt) +		ion_handle_kmap_put(handle); +	mutex_unlock(&buffer->lock); + +	idr_remove(&client->idr, handle->id); +	if (!RB_EMPTY_NODE(&handle->node)) +		rb_erase(&handle->node, &client->handles); + +	ion_buffer_remove_from_handle(buffer); +	ion_buffer_put(buffer); + +	kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ +	return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ +	kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ +	struct ion_client *client = handle->client; +	int ret; + +	mutex_lock(&client->lock); +	ret = kref_put(&handle->ref, ion_handle_destroy); +	mutex_unlock(&client->lock); + +	return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, +					    struct ion_buffer *buffer) +{ +	struct rb_node *n = client->handles.rb_node; + +	while (n) { +		struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + +		if (buffer < entry->buffer) +			n = n->rb_left; +		else if (buffer > entry->buffer) +			n = n->rb_right; +		else +			return entry; +	} +	return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +						int id) +{ +	struct ion_handle *handle; + +	mutex_lock(&client->lock); +	handle = idr_find(&client->idr, id); +	if (handle) +		ion_handle_get(handle); +	mutex_unlock(&client->lock); + +	return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, +				struct ion_handle *handle) +{ +	WARN_ON(!mutex_is_locked(&client->lock)); +	return idr_find(&client->idr, handle->id) == handle; +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ +	int id; +	struct rb_node **p = &client->handles.rb_node; +	struct rb_node *parent = NULL; +	struct ion_handle *entry; + +	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); +	if (id < 0) +		return id; + +	handle->id = id; + +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_handle, node); + +		if (handle->buffer < entry->buffer) +			p = &(*p)->rb_left; +		else if (handle->buffer > entry->buffer) +			p = &(*p)->rb_right; +		else +			WARN(1, "%s: buffer already found.", __func__); +	} + +	rb_link_node(&handle->node, parent, p); +	rb_insert_color(&handle->node, &client->handles); + +	return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, +			     size_t align, unsigned int heap_id_mask, +			     unsigned int flags) +{ +	struct ion_handle *handle; +	struct ion_device *dev = client->dev; +	struct ion_buffer *buffer = NULL; +	struct ion_heap *heap; +	int ret; + +	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, +		 len, align, heap_id_mask, flags); +	/* +	 * traverse the list of heaps available in this system in priority +	 * order.  If the heap type is supported by the client, and matches the +	 * request of the caller allocate from it.  Repeat until allocate has +	 * succeeded or all heaps have been tried +	 */ +	len = PAGE_ALIGN(len); + +	if (!len) +		return ERR_PTR(-EINVAL); + +	down_read(&dev->lock); +	plist_for_each_entry(heap, &dev->heaps, node) { +		/* if the caller didn't specify this heap id */ +		if (!((1 << heap->id) & heap_id_mask)) +			continue; +		buffer = ion_buffer_create(heap, dev, len, align, flags); +		if (!IS_ERR(buffer)) +			break; +	} +	up_read(&dev->lock); + +	if (buffer == NULL) +		return ERR_PTR(-ENODEV); + +	if (IS_ERR(buffer)) +		return ERR_CAST(buffer); + +	handle = ion_handle_create(client, buffer); + +	/* +	 * ion_buffer_create will create a buffer with a ref_cnt of 1, +	 * and ion_handle_create will take a second reference, drop one here +	 */ +	ion_buffer_put(buffer); + +	if (IS_ERR(handle)) +		return handle; + +	mutex_lock(&client->lock); +	ret = ion_handle_add(client, handle); +	mutex_unlock(&client->lock); +	if (ret) { +		ion_handle_put(handle); +		handle = ERR_PTR(ret); +	} + +	return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ +	bool valid_handle; + +	BUG_ON(client != handle->client); + +	mutex_lock(&client->lock); +	valid_handle = ion_handle_validate(client, handle); + +	if (!valid_handle) { +		WARN(1, "%s: invalid handle passed to free.\n", __func__); +		mutex_unlock(&client->lock); +		return; +	} +	mutex_unlock(&client->lock); +	ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, +	     ion_phys_addr_t *addr, size_t *len) +{ +	struct ion_buffer *buffer; +	int ret; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		mutex_unlock(&client->lock); +		return -EINVAL; +	} + +	buffer = handle->buffer; + +	if (!buffer->heap->ops->phys) { +		pr_err("%s: ion_phys is not implemented by this heap.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return -ENODEV; +	} +	mutex_unlock(&client->lock); +	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); +	return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ +	void *vaddr; + +	if (buffer->kmap_cnt) { +		buffer->kmap_cnt++; +		return buffer->vaddr; +	} +	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); +	if (WARN_ONCE(vaddr == NULL, +			"heap->ops->map_kernel should return ERR_PTR on error")) +		return ERR_PTR(-EINVAL); +	if (IS_ERR(vaddr)) +		return vaddr; +	buffer->vaddr = vaddr; +	buffer->kmap_cnt++; +	return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ +	struct ion_buffer *buffer = handle->buffer; +	void *vaddr; + +	if (handle->kmap_cnt) { +		handle->kmap_cnt++; +		return buffer->vaddr; +	} +	vaddr = ion_buffer_kmap_get(buffer); +	if (IS_ERR(vaddr)) +		return vaddr; +	handle->kmap_cnt++; +	return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ +	buffer->kmap_cnt--; +	if (!buffer->kmap_cnt) { +		buffer->heap->ops->unmap_kernel(buffer->heap, buffer); +		buffer->vaddr = NULL; +	} +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ +	struct ion_buffer *buffer = handle->buffer; + +	if (!handle->kmap_cnt) { +		WARN(1, "%s: Double unmap detected! bailing...\n", __func__); +		return; +	} +	handle->kmap_cnt--; +	if (!handle->kmap_cnt) +		ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	void *vaddr; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		pr_err("%s: invalid handle passed to map_kernel.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} + +	buffer = handle->buffer; + +	if (!handle->buffer->heap->ops->map_kernel) { +		pr_err("%s: map_kernel is not implemented by this heap.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-ENODEV); +	} + +	mutex_lock(&buffer->lock); +	vaddr = ion_handle_kmap_get(handle); +	mutex_unlock(&buffer->lock); +	mutex_unlock(&client->lock); +	return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ +	struct ion_buffer *buffer; + +	mutex_lock(&client->lock); +	buffer = handle->buffer; +	mutex_lock(&buffer->lock); +	ion_handle_kmap_put(handle); +	mutex_unlock(&buffer->lock); +	mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ +	struct ion_client *client = s->private; +	struct rb_node *n; +	size_t sizes[ION_NUM_HEAP_IDS] = {0}; +	const char *names[ION_NUM_HEAP_IDS] = {NULL}; +	int i; + +	mutex_lock(&client->lock); +	for (n = rb_first(&client->handles); n; n = rb_next(n)) { +		struct ion_handle *handle = rb_entry(n, struct ion_handle, +						     node); +		unsigned int id = handle->buffer->heap->id; + +		if (!names[id]) +			names[id] = handle->buffer->heap->name; +		sizes[id] += handle->buffer->size; +	} +	mutex_unlock(&client->lock); + +	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); +	for (i = 0; i < ION_NUM_HEAP_IDS; i++) { +		if (!names[i]) +			continue; +		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); +	} +	return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ +	return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { +	.open = ion_debug_client_open, +	.read = seq_read, +	.llseek = seq_lseek, +	.release = single_release, +}; + +static int ion_get_client_serial(const struct rb_root *root, +					const unsigned char *name) +{ +	int serial = -1; +	struct rb_node *node; + +	for (node = rb_first(root); node; node = rb_next(node)) { +		struct ion_client *client = rb_entry(node, struct ion_client, +						node); + +		if (strcmp(client->name, name)) +			continue; +		serial = max(serial, client->display_serial); +	} +	return serial + 1; +} + +struct ion_client *ion_client_create(struct ion_device *dev, +				     const char *name) +{ +	struct ion_client *client; +	struct task_struct *task; +	struct rb_node **p; +	struct rb_node *parent = NULL; +	struct ion_client *entry; +	pid_t pid; + +	if (!name) { +		pr_err("%s: Name cannot be null\n", __func__); +		return ERR_PTR(-EINVAL); +	} + +	get_task_struct(current->group_leader); +	task_lock(current->group_leader); +	pid = task_pid_nr(current->group_leader); +	/* don't bother to store task struct for kernel threads, +	   they can't be killed anyway */ +	if (current->group_leader->flags & PF_KTHREAD) { +		put_task_struct(current->group_leader); +		task = NULL; +	} else { +		task = current->group_leader; +	} +	task_unlock(current->group_leader); + +	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); +	if (!client) +		goto err_put_task_struct; + +	client->dev = dev; +	client->handles = RB_ROOT; +	idr_init(&client->idr); +	mutex_init(&client->lock); +	client->task = task; +	client->pid = pid; +	client->name = kstrdup(name, GFP_KERNEL); +	if (!client->name) +		goto err_free_client; + +	down_write(&dev->lock); +	client->display_serial = ion_get_client_serial(&dev->clients, name); +	client->display_name = kasprintf( +		GFP_KERNEL, "%s-%d", name, client->display_serial); +	if (!client->display_name) { +		up_write(&dev->lock); +		goto err_free_client_name; +	} +	p = &dev->clients.rb_node; +	while (*p) { +		parent = *p; +		entry = rb_entry(parent, struct ion_client, node); + +		if (client < entry) +			p = &(*p)->rb_left; +		else if (client > entry) +			p = &(*p)->rb_right; +	} +	rb_link_node(&client->node, parent, p); +	rb_insert_color(&client->node, &dev->clients); + +	client->debug_root = debugfs_create_file(client->display_name, 0664, +						dev->clients_debug_root, +						client, &debug_client_fops); +	if (!client->debug_root) { +		char buf[256], *path; +		path = dentry_path(dev->clients_debug_root, buf, 256); +		pr_err("Failed to create client debugfs at %s/%s\n", +			path, client->display_name); +	} + +	up_write(&dev->lock); + +	return client; + +err_free_client_name: +	kfree(client->name); +err_free_client: +	kfree(client); +err_put_task_struct: +	if (task) +		put_task_struct(current->group_leader); +	return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ +	struct ion_device *dev = client->dev; +	struct rb_node *n; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	while ((n = rb_first(&client->handles))) { +		struct ion_handle *handle = rb_entry(n, struct ion_handle, +						     node); +		ion_handle_destroy(&handle->ref); +	} + +	idr_destroy(&client->idr); + +	down_write(&dev->lock); +	if (client->task) +		put_task_struct(client->task); +	rb_erase(&client->node, &dev->clients); +	debugfs_remove_recursive(client->debug_root); +	up_write(&dev->lock); + +	kfree(client->display_name); +	kfree(client->name); +	kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, +			      struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	struct sg_table *table; + +	mutex_lock(&client->lock); +	if (!ion_handle_validate(client, handle)) { +		pr_err("%s: invalid handle passed to map_dma.\n", +		       __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} +	buffer = handle->buffer; +	table = buffer->sg_table; +	mutex_unlock(&client->lock); +	return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, +				       struct device *dev, +				       enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, +					enum dma_data_direction direction) +{ +	struct dma_buf *dmabuf = attachment->dmabuf; +	struct ion_buffer *buffer = dmabuf->priv; + +	ion_buffer_sync_for_device(buffer, attachment->dev, direction); +	return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, +			      struct sg_table *table, +			      enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, +		size_t size, enum dma_data_direction dir) +{ +	struct scatterlist sg; + +	sg_init_table(&sg, 1); +	sg_set_page(&sg, page, size, 0); +	/* +	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid +	 * for the the targeted device, but this works on the currently targeted +	 * hardware. +	 */ +	sg_dma_address(&sg) = page_to_phys(page); +	dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { +	struct list_head list; +	struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, +				       struct device *dev, +				       enum dma_data_direction dir) +{ +	struct ion_vma_list *vma_list; +	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +	int i; + +	pr_debug("%s: syncing for device %s\n", __func__, +		 dev ? dev_name(dev) : "null"); + +	if (!ion_buffer_fault_user_mappings(buffer)) +		return; + +	mutex_lock(&buffer->lock); +	for (i = 0; i < pages; i++) { +		struct page *page = buffer->pages[i]; + +		if (ion_buffer_page_is_dirty(page)) +			ion_pages_sync_for_device(dev, ion_buffer_page(page), +							PAGE_SIZE, dir); + +		ion_buffer_page_clean(buffer->pages + i); +	} +	list_for_each_entry(vma_list, &buffer->vmas, list) { +		struct vm_area_struct *vma = vma_list->vma; + +		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, +			       NULL); +	} +	mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	unsigned long pfn; +	int ret; + +	mutex_lock(&buffer->lock); +	ion_buffer_page_dirty(buffer->pages + vmf->pgoff); +	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + +	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); +	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); +	mutex_unlock(&buffer->lock); +	if (ret) +		return VM_FAULT_ERROR; + +	return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	struct ion_vma_list *vma_list; + +	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); +	if (!vma_list) +		return; +	vma_list->vma = vma; +	mutex_lock(&buffer->lock); +	list_add(&vma_list->list, &buffer->vmas); +	mutex_unlock(&buffer->lock); +	pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = vma->vm_private_data; +	struct ion_vma_list *vma_list, *tmp; + +	pr_debug("%s\n", __func__); +	mutex_lock(&buffer->lock); +	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { +		if (vma_list->vma != vma) +			continue; +		list_del(&vma_list->list); +		kfree(vma_list); +		pr_debug("%s: deleting %p\n", __func__, vma); +		break; +	} +	mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { +	.open = ion_vm_open, +	.close = ion_vm_close, +	.fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ +	struct ion_buffer *buffer = dmabuf->priv; +	int ret = 0; + +	if (!buffer->heap->ops->map_user) { +		pr_err("%s: this heap does not define a method for mapping to userspace\n", +			__func__); +		return -EINVAL; +	} + +	if (ion_buffer_fault_user_mappings(buffer)) { +		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | +							VM_DONTDUMP; +		vma->vm_private_data = buffer; +		vma->vm_ops = &ion_vma_ops; +		ion_vm_open(vma); +		return 0; +	} + +	if (!(buffer->flags & ION_FLAG_CACHED)) +		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + +	mutex_lock(&buffer->lock); +	/* now map it to userspace */ +	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); +	mutex_unlock(&buffer->lock); + +	if (ret) +		pr_err("%s: failure mapping buffer to userspace\n", +		       __func__); + +	return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, +			       void *ptr) +{ +	return; +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, +					size_t len, +					enum dma_data_direction direction) +{ +	struct ion_buffer *buffer = dmabuf->priv; +	void *vaddr; + +	if (!buffer->heap->ops->map_kernel) { +		pr_err("%s: map kernel is not implemented by this heap.\n", +		       __func__); +		return -ENODEV; +	} + +	mutex_lock(&buffer->lock); +	vaddr = ion_buffer_kmap_get(buffer); +	mutex_unlock(&buffer->lock); +	return PTR_ERR_OR_ZERO(vaddr); +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, +				       size_t len, +				       enum dma_data_direction direction) +{ +	struct ion_buffer *buffer = dmabuf->priv; + +	mutex_lock(&buffer->lock); +	ion_buffer_kmap_put(buffer); +	mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { +	.map_dma_buf = ion_map_dma_buf, +	.unmap_dma_buf = ion_unmap_dma_buf, +	.mmap = ion_mmap, +	.release = ion_dma_buf_release, +	.begin_cpu_access = ion_dma_buf_begin_cpu_access, +	.end_cpu_access = ion_dma_buf_end_cpu_access, +	.kmap_atomic = ion_dma_buf_kmap, +	.kunmap_atomic = ion_dma_buf_kunmap, +	.kmap = ion_dma_buf_kmap, +	.kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, +						struct ion_handle *handle) +{ +	struct ion_buffer *buffer; +	struct dma_buf *dmabuf; +	bool valid_handle; + +	mutex_lock(&client->lock); +	valid_handle = ion_handle_validate(client, handle); +	if (!valid_handle) { +		WARN(1, "%s: invalid handle passed to share.\n", __func__); +		mutex_unlock(&client->lock); +		return ERR_PTR(-EINVAL); +	} +	buffer = handle->buffer; +	ion_buffer_get(buffer); +	mutex_unlock(&client->lock); + +	dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); +	if (IS_ERR(dmabuf)) { +		ion_buffer_put(buffer); +		return dmabuf; +	} + +	return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ +	struct dma_buf *dmabuf; +	int fd; + +	dmabuf = ion_share_dma_buf(client, handle); +	if (IS_ERR(dmabuf)) +		return PTR_ERR(dmabuf); + +	fd = dma_buf_fd(dmabuf, O_CLOEXEC); +	if (fd < 0) +		dma_buf_put(dmabuf); + +	return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ +	struct dma_buf *dmabuf; +	struct ion_buffer *buffer; +	struct ion_handle *handle; +	int ret; + +	dmabuf = dma_buf_get(fd); +	if (IS_ERR(dmabuf)) +		return ERR_CAST(dmabuf); +	/* if this memory came from ion */ + +	if (dmabuf->ops != &dma_buf_ops) { +		pr_err("%s: can not import dmabuf from another exporter\n", +		       __func__); +		dma_buf_put(dmabuf); +		return ERR_PTR(-EINVAL); +	} +	buffer = dmabuf->priv; + +	mutex_lock(&client->lock); +	/* if a handle exists for this buffer just take a reference to it */ +	handle = ion_handle_lookup(client, buffer); +	if (!IS_ERR(handle)) { +		ion_handle_get(handle); +		mutex_unlock(&client->lock); +		goto end; +	} +	mutex_unlock(&client->lock); + +	handle = ion_handle_create(client, buffer); +	if (IS_ERR(handle)) +		goto end; + +	mutex_lock(&client->lock); +	ret = ion_handle_add(client, handle); +	mutex_unlock(&client->lock); +	if (ret) { +		ion_handle_put(handle); +		handle = ERR_PTR(ret); +	} + +end: +	dma_buf_put(dmabuf); +	return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ +	struct dma_buf *dmabuf; +	struct ion_buffer *buffer; + +	dmabuf = dma_buf_get(fd); +	if (IS_ERR(dmabuf)) +		return PTR_ERR(dmabuf); + +	/* if this memory came from ion */ +	if (dmabuf->ops != &dma_buf_ops) { +		pr_err("%s: can not sync dmabuf from another exporter\n", +		       __func__); +		dma_buf_put(dmabuf); +		return -EINVAL; +	} +	buffer = dmabuf->priv; + +	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, +			       buffer->sg_table->nents, DMA_BIDIRECTIONAL); +	dma_buf_put(dmabuf); +	return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ +	switch (cmd) { +	case ION_IOC_SYNC: +	case ION_IOC_FREE: +	case ION_IOC_CUSTOM: +		return _IOC_WRITE; +	default: +		return _IOC_DIR(cmd); +	} +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ +	struct ion_client *client = filp->private_data; +	struct ion_device *dev = client->dev; +	struct ion_handle *cleanup_handle = NULL; +	int ret = 0; +	unsigned int dir; + +	union { +		struct ion_fd_data fd; +		struct ion_allocation_data allocation; +		struct ion_handle_data handle; +		struct ion_custom_data custom; +	} data; + +	dir = ion_ioctl_dir(cmd); + +	if (_IOC_SIZE(cmd) > sizeof(data)) +		return -EINVAL; + +	if (dir & _IOC_WRITE) +		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) +			return -EFAULT; + +	switch (cmd) { +	case ION_IOC_ALLOC: +	{ +		struct ion_handle *handle; + +		handle = ion_alloc(client, data.allocation.len, +						data.allocation.align, +						data.allocation.heap_id_mask, +						data.allocation.flags); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); + +		data.allocation.handle = handle->id; + +		cleanup_handle = handle; +		break; +	} +	case ION_IOC_FREE: +	{ +		struct ion_handle *handle; + +		handle = ion_handle_get_by_id(client, data.handle.handle); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); +		ion_free(client, handle); +		ion_handle_put(handle); +		break; +	} +	case ION_IOC_SHARE: +	case ION_IOC_MAP: +	{ +		struct ion_handle *handle; + +		handle = ion_handle_get_by_id(client, data.handle.handle); +		if (IS_ERR(handle)) +			return PTR_ERR(handle); +		data.fd.fd = ion_share_dma_buf_fd(client, handle); +		ion_handle_put(handle); +		if (data.fd.fd < 0) +			ret = data.fd.fd; +		break; +	} +	case ION_IOC_IMPORT: +	{ +		struct ion_handle *handle; + +		handle = ion_import_dma_buf(client, data.fd.fd); +		if (IS_ERR(handle)) +			ret = PTR_ERR(handle); +		else +			data.handle.handle = handle->id; +		break; +	} +	case ION_IOC_SYNC: +	{ +		ret = ion_sync_for_device(client, data.fd.fd); +		break; +	} +	case ION_IOC_CUSTOM: +	{ +		if (!dev->custom_ioctl) +			return -ENOTTY; +		ret = dev->custom_ioctl(client, data.custom.cmd, +						data.custom.arg); +		break; +	} +	default: +		return -ENOTTY; +	} + +	if (dir & _IOC_READ) { +		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { +			if (cleanup_handle) +				ion_free(client, cleanup_handle); +			return -EFAULT; +		} +	} +	return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ +	struct ion_client *client = file->private_data; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	ion_client_destroy(client); +	return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ +	struct miscdevice *miscdev = file->private_data; +	struct ion_device *dev = container_of(miscdev, struct ion_device, dev); +	struct ion_client *client; +	char debug_name[64]; + +	pr_debug("%s: %d\n", __func__, __LINE__); +	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); +	client = ion_client_create(dev, debug_name); +	if (IS_ERR(client)) +		return PTR_ERR(client); +	file->private_data = client; + +	return 0; +} + +static const struct file_operations ion_fops = { +	.owner          = THIS_MODULE, +	.open           = ion_open, +	.release        = ion_release, +	.unlocked_ioctl = ion_ioctl, +	.compat_ioctl   = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, +				   unsigned int id) +{ +	size_t size = 0; +	struct rb_node *n; + +	mutex_lock(&client->lock); +	for (n = rb_first(&client->handles); n; n = rb_next(n)) { +		struct ion_handle *handle = rb_entry(n, +						     struct ion_handle, +						     node); +		if (handle->buffer->heap->id == id) +			size += handle->buffer->size; +	} +	mutex_unlock(&client->lock); +	return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ +	struct ion_heap *heap = s->private; +	struct ion_device *dev = heap->dev; +	struct rb_node *n; +	size_t total_size = 0; +	size_t total_orphaned_size = 0; + +	seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); +	seq_puts(s, "----------------------------------------------------\n"); + +	for (n = rb_first(&dev->clients); n; n = rb_next(n)) { +		struct ion_client *client = rb_entry(n, struct ion_client, +						     node); +		size_t size = ion_debug_heap_total(client, heap->id); + +		if (!size) +			continue; +		if (client->task) { +			char task_comm[TASK_COMM_LEN]; + +			get_task_comm(task_comm, client->task); +			seq_printf(s, "%16.s %16u %16zu\n", task_comm, +				   client->pid, size); +		} else { +			seq_printf(s, "%16.s %16u %16zu\n", client->name, +				   client->pid, size); +		} +	} +	seq_puts(s, "----------------------------------------------------\n"); +	seq_puts(s, "orphaned allocations (info is from last known client):\n"); +	mutex_lock(&dev->buffer_lock); +	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { +		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, +						     node); +		if (buffer->heap->id != heap->id) +			continue; +		total_size += buffer->size; +		if (!buffer->handle_count) { +			seq_printf(s, "%16.s %16u %16zu %d %d\n", +				   buffer->task_comm, buffer->pid, +				   buffer->size, buffer->kmap_cnt, +				   atomic_read(&buffer->ref.refcount)); +			total_orphaned_size += buffer->size; +		} +	} +	mutex_unlock(&dev->buffer_lock); +	seq_puts(s, "----------------------------------------------------\n"); +	seq_printf(s, "%16.s %16zu\n", "total orphaned", +		   total_orphaned_size); +	seq_printf(s, "%16.s %16zu\n", "total ", total_size); +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		seq_printf(s, "%16.s %16zu\n", "deferred free", +				heap->free_list_size); +	seq_puts(s, "----------------------------------------------------\n"); + +	if (heap->debug_show) +		heap->debug_show(heap, s, unused); + +	return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ +	return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { +	.open = ion_debug_heap_open, +	.read = seq_read, +	.llseek = seq_lseek, +	.release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ +	struct ion_heap *heap = data; +	struct shrink_control sc; +	int objs; + +	sc.gfp_mask = -1; +	sc.nr_to_scan = 0; + +	if (!val) +		return 0; + +	objs = heap->shrinker.shrink(&heap->shrinker, &sc); +	sc.nr_to_scan = objs; + +	heap->shrinker.shrink(&heap->shrinker, &sc); +	return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ +	struct ion_heap *heap = data; +	struct shrink_control sc; +	int objs; + +	sc.gfp_mask = -1; +	sc.nr_to_scan = 0; + +	objs = heap->shrinker.shrink(&heap->shrinker, &sc); +	*val = objs; +	return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, +			debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ +	struct dentry *debug_file; + +	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || +	    !heap->ops->unmap_dma) +		pr_err("%s: can not add heap with invalid ops struct.\n", +		       __func__); + +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		ion_heap_init_deferred_free(heap); + +	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) +		ion_heap_init_shrinker(heap); + +	heap->dev = dev; +	down_write(&dev->lock); +	/* use negative heap->id to reverse the priority -- when traversing +	   the list later attempt higher id numbers first */ +	plist_node_init(&heap->node, -heap->id); +	plist_add(&heap->node, &dev->heaps); +	debug_file = debugfs_create_file(heap->name, 0664, +					dev->heaps_debug_root, heap, +					&debug_heap_fops); + +	if (!debug_file) { +		char buf[256], *path; + +		path = dentry_path(dev->heaps_debug_root, buf, 256); +		pr_err("Failed to create heap debugfs at %s/%s\n", +			path, heap->name); +	} + +#ifdef DEBUG_HEAP_SHRINKER +	if (heap->shrinker.shrink) { +		char debug_name[64]; + +		snprintf(debug_name, 64, "%s_shrink", heap->name); +		debug_file = debugfs_create_file( +			debug_name, 0644, dev->heaps_debug_root, heap, +			&debug_shrink_fops); +		if (!debug_file) { +			char buf[256], *path; + +			path = dentry_path(dev->heaps_debug_root, buf, 256); +			pr_err("Failed to create heap shrinker debugfs at %s/%s\n", +				path, debug_name); +		} +	} +#endif +	up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) +				     (struct ion_client *client, +				      unsigned int cmd, +				      unsigned long arg)) +{ +	struct ion_device *idev; +	int ret; + +	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); +	if (!idev) +		return ERR_PTR(-ENOMEM); + +	idev->dev.minor = MISC_DYNAMIC_MINOR; +	idev->dev.name = "ion"; +	idev->dev.fops = &ion_fops; +	idev->dev.parent = NULL; +	ret = misc_register(&idev->dev); +	if (ret) { +		pr_err("ion: failed to register misc device.\n"); +		return ERR_PTR(ret); +	} + +	idev->debug_root = debugfs_create_dir("ion", NULL); +	if (!idev->debug_root) { +		pr_err("ion: failed to create debugfs root directory.\n"); +		goto debugfs_done; +	} +	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); +	if (!idev->heaps_debug_root) { +		pr_err("ion: failed to create debugfs heaps directory.\n"); +		goto debugfs_done; +	} +	idev->clients_debug_root = debugfs_create_dir("clients", +						idev->debug_root); +	if (!idev->clients_debug_root) +		pr_err("ion: failed to create debugfs clients directory.\n"); + +debugfs_done: + +	idev->custom_ioctl = custom_ioctl; +	idev->buffers = RB_ROOT; +	mutex_init(&idev->buffer_lock); +	init_rwsem(&idev->lock); +	plist_head_init(&idev->heaps); +	idev->clients = RB_ROOT; +	return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ +	misc_deregister(&dev->dev); +	debugfs_remove_recursive(dev->debug_root); +	/* XXX need to free the heaps and clients ? */ +	kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ +	int i; + +	for (i = 0; i < data->nr; i++) { +		if (data->heaps[i].size == 0) +			continue; + +		if (data->heaps[i].base == 0) { +			phys_addr_t paddr; + +			paddr = memblock_alloc_base(data->heaps[i].size, +						    data->heaps[i].align, +						    MEMBLOCK_ALLOC_ANYWHERE); +			if (!paddr) { +				pr_err("%s: error allocating memblock for heap %d\n", +					__func__, i); +				continue; +			} +			data->heaps[i].base = paddr; +		} else { +			int ret = memblock_reserve(data->heaps[i].base, +					       data->heaps[i].size); +			if (ret) +				pr_err("memblock reserve of %zx@%lx failed\n", +				       data->heaps[i].size, +				       data->heaps[i].base); +		} +		pr_info("%s: %s reserved base %lx size %zu\n", __func__, +			data->heaps[i].name, +			data->heaps[i].base, +			data->heaps[i].size); +	} +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 00000000000..dcd2a0cdb19 --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,204 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include <linux/types.h> + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully +   plumbed in the kernel, and all instances of ion_phys_addr_t should +   be converted to phys_addr_t.  For the time being many kernel interfaces +   do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type:	type of the heap from ion_heap_type enum + * @id:		unique identifier for heap.  When allocating higher numbers + *		will be allocated from first.  At allocation these are passed + *		as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name:	used for debug purposes + * @base:	base address of heap in physical memory if applicable + * @size:	size of the heap in bytes if applicable + * @align:	required alignment in physical memory if applicable + * @priv:	private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { +	enum ion_heap_type type; +	unsigned int id; +	const char *name; +	ion_phys_addr_t base; +	size_t size; +	ion_phys_addr_t align; +	void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr:		number of structures in the array + * @heaps:	array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { +	int nr; +	struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data:	platform data specifying starting physical address and + *		size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() -  allocate a client and returns it + * @dev:		the global ion device + * @heap_type_mask:	mask of heaps this client can allocate from + * @name:		used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, +				     const char *name); + +/** + * ion_client_destroy() -  free's a client and all it's handles + * @client:	the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client:		the client + * @len:		size of the allocation + * @align:		requested allocation alignment, lots of hardware blocks + *			have alignment requirements of some kind + * @heap_id_mask:	mask of heaps to allocate from, if multiple bits are set + *			heaps will be tried in order from highest to lowest + *			id + * @flags:		heap flags, the low 16 bits are consumed by ion, the + *			high 16 bits are passed on to the respective heap and + *			can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, +			     size_t align, unsigned int heap_id_mask, +			     unsigned int flags); + +/** + * ion_free - free a handle + * @client:	the client + * @handle:	the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client:	the client + * @handle:	the handle + * @addr:	a pointer to put the address in + * @len:	a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address.  It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead.  Returns -EINVAL if the handle is invalid.  This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, +	     ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client:	the client + * @handle:	the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, +			      struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client:	the client + * @handle:	handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client:	the client + * @handle:	handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client:	the client + * @handle:	the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, +						struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client:	the client + * @handle:	the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client:	the client + * @fd:		the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it.  If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 00000000000..dcb6f2196c8 --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/staging/android/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { +	struct ion_heap heap; +	struct gen_pool *pool; +	ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, +				      unsigned long size, +				      unsigned long align) +{ +	struct ion_carveout_heap *carveout_heap = +		container_of(heap, struct ion_carveout_heap, heap); +	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + +	if (!offset) +		return ION_CARVEOUT_ALLOCATE_FAIL; + +	return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, +		       unsigned long size) +{ +	struct ion_carveout_heap *carveout_heap = +		container_of(heap, struct ion_carveout_heap, heap); + +	if (addr == ION_CARVEOUT_ALLOCATE_FAIL) +		return; +	gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, +				  struct ion_buffer *buffer, +				  ion_phys_addr_t *addr, size_t *len) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + +	*addr = paddr; +	*len = buffer->size; +	return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long size, unsigned long align, +				      unsigned long flags) +{ +	struct sg_table *table; +	ion_phys_addr_t paddr; +	int ret; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		return -ENOMEM; +	ret = sg_alloc_table(table, 1, GFP_KERNEL); +	if (ret) +		goto err_free; + +	paddr = ion_carveout_allocate(heap, size, align); +	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { +		ret = -ENOMEM; +		goto err_free_table; +	} + +	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); +	buffer->priv_virt = table; + +	return 0; + +err_free_table: +	sg_free_table(table); +err_free: +	kfree(table); +	return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ +	struct ion_heap *heap = buffer->heap; +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + +	ion_heap_buffer_zero(buffer); + +	if (ion_buffer_cached(buffer)) +		dma_sync_sg_for_device(NULL, table->sgl, table->nents, +							DMA_BIDIRECTIONAL); + +	ion_carveout_free(heap, paddr, buffer->size); +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, +						  struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, +					struct ion_buffer *buffer) +{ +	return; +} + +static struct ion_heap_ops carveout_heap_ops = { +	.allocate = ion_carveout_heap_allocate, +	.free = ion_carveout_heap_free, +	.phys = ion_carveout_heap_phys, +	.map_dma = ion_carveout_heap_map_dma, +	.unmap_dma = ion_carveout_heap_unmap_dma, +	.map_user = ion_heap_map_user, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_carveout_heap *carveout_heap; +	int ret; + +	struct page *page; +	size_t size; + +	page = pfn_to_page(PFN_DOWN(heap_data->base)); +	size = heap_data->size; + +	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + +	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); +	if (ret) +		return ERR_PTR(ret); + +	carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); +	if (!carveout_heap) +		return ERR_PTR(-ENOMEM); + +	carveout_heap->pool = gen_pool_create(12, -1); +	if (!carveout_heap->pool) { +		kfree(carveout_heap); +		return ERR_PTR(-ENOMEM); +	} +	carveout_heap->base = heap_data->base; +	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, +		     -1); +	carveout_heap->heap.ops = &carveout_heap_ops; +	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; +	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + +	return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ +	struct ion_carveout_heap *carveout_heap = +	     container_of(heap, struct  ion_carveout_heap, heap); + +	gen_pool_destroy(carveout_heap->pool); +	kfree(carveout_heap); +	carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 00000000000..3f2c12ba4d1 --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { +	struct ion_heap heap; +	struct gen_pool *pool; +	ion_phys_addr_t base; +	unsigned long chunk_size; +	unsigned long size; +	unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long size, unsigned long align, +				      unsigned long flags) +{ +	struct ion_chunk_heap *chunk_heap = +		container_of(heap, struct ion_chunk_heap, heap); +	struct sg_table *table; +	struct scatterlist *sg; +	int ret, i; +	unsigned long num_chunks; +	unsigned long allocated_size; + +	if (align > chunk_heap->chunk_size) +		return -EINVAL; + +	allocated_size = ALIGN(size, chunk_heap->chunk_size); +	num_chunks = allocated_size / chunk_heap->chunk_size; + +	if (allocated_size > chunk_heap->size - chunk_heap->allocated) +		return -ENOMEM; + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		return -ENOMEM; +	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); +	if (ret) { +		kfree(table); +		return ret; +	} + +	sg = table->sgl; +	for (i = 0; i < num_chunks; i++) { +		unsigned long paddr = gen_pool_alloc(chunk_heap->pool, +						     chunk_heap->chunk_size); +		if (!paddr) +			goto err; +		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), +				chunk_heap->chunk_size, 0); +		sg = sg_next(sg); +	} + +	buffer->priv_virt = table; +	chunk_heap->allocated += allocated_size; +	return 0; +err: +	sg = table->sgl; +	for (i -= 1; i >= 0; i--) { +		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), +			      sg->length); +		sg = sg_next(sg); +	} +	sg_free_table(table); +	kfree(table); +	return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ +	struct ion_heap *heap = buffer->heap; +	struct ion_chunk_heap *chunk_heap = +		container_of(heap, struct ion_chunk_heap, heap); +	struct sg_table *table = buffer->priv_virt; +	struct scatterlist *sg; +	int i; +	unsigned long allocated_size; + +	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + +	ion_heap_buffer_zero(buffer); + +	if (ion_buffer_cached(buffer)) +		dma_sync_sg_for_device(NULL, table->sgl, table->nents, +								DMA_BIDIRECTIONAL); + +	for_each_sg(table->sgl, sg, table->nents, i) { +		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), +			      sg->length); +	} +	chunk_heap->allocated -= allocated_size; +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, +					       struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, +				     struct ion_buffer *buffer) +{ +	return; +} + +static struct ion_heap_ops chunk_heap_ops = { +	.allocate = ion_chunk_heap_allocate, +	.free = ion_chunk_heap_free, +	.map_dma = ion_chunk_heap_map_dma, +	.unmap_dma = ion_chunk_heap_unmap_dma, +	.map_user = ion_heap_map_user, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_chunk_heap *chunk_heap; +	int ret; +	struct page *page; +	size_t size; + +	page = pfn_to_page(PFN_DOWN(heap_data->base)); +	size = heap_data->size; + +	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + +	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); +	if (ret) +		return ERR_PTR(ret); + +	chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); +	if (!chunk_heap) +		return ERR_PTR(-ENOMEM); + +	chunk_heap->chunk_size = (unsigned long)heap_data->priv; +	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + +					   PAGE_SHIFT, -1); +	if (!chunk_heap->pool) { +		ret = -ENOMEM; +		goto error_gen_pool_create; +	} +	chunk_heap->base = heap_data->base; +	chunk_heap->size = heap_data->size; +	chunk_heap->allocated = 0; + +	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); +	chunk_heap->heap.ops = &chunk_heap_ops; +	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; +	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; +	pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, +		heap_data->size, heap_data->align); + +	return &chunk_heap->heap; + +error_gen_pool_create: +	kfree(chunk_heap); +	return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ +	struct ion_chunk_heap *chunk_heap = +	     container_of(heap, struct  ion_chunk_heap, heap); + +	gen_pool_destroy(chunk_heap->pool); +	kfree(chunk_heap); +	chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 00000000000..ce68ecfed31 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,218 @@ +/* + * drivers/staging/android/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { +	struct ion_heap heap; +	struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { +	void *cpu_addr; +	dma_addr_t handle; +	struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, +			       void *cpu_addr, dma_addr_t handle, size_t size) +{ +	struct page *page = virt_to_page(cpu_addr); +	int ret; + +	ret = sg_alloc_table(sgt, 1, GFP_KERNEL); +	if (unlikely(ret)) +		return ret; + +	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); +	return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, +			    unsigned long len, unsigned long align, +			    unsigned long flags) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info; + +	dev_dbg(dev, "Request buffer allocation len %ld\n", len); + +	if (buffer->flags & ION_FLAG_CACHED) +		return -EINVAL; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); +	if (!info) { +		dev_err(dev, "Can't allocate buffer info\n"); +		return ION_CMA_ALLOCATE_FAILED; +	} + +	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), +						GFP_HIGHUSER | __GFP_ZERO); + +	if (!info->cpu_addr) { +		dev_err(dev, "Fail to allocate buffer\n"); +		goto err; +	} + +	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!info->table) { +		dev_err(dev, "Fail to allocate sg table\n"); +		goto free_mem; +	} + +	if (ion_cma_get_sgtable +	    (dev, info->table, info->cpu_addr, info->handle, len)) +		goto free_table; +	/* keep this for memory release */ +	buffer->priv_virt = info; +	dev_dbg(dev, "Allocate buffer %p\n", buffer); +	return 0; + +free_table: +	kfree(info->table); +free_mem: +	dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: +	kfree(info); +	return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	dev_dbg(dev, "Release buffer %p\n", buffer); +	/* release memory */ +	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); +	/* release sg table */ +	sg_free_table(info->table); +	kfree(info->table); +	kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, +			ion_phys_addr_t *addr, size_t *len) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer, +		&info->handle); + +	*addr = info->handle; +	*len = buffer->size; + +	return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, +					     struct ion_buffer *buffer) +{ +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, +				   struct ion_buffer *buffer) +{ +	return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, +			struct vm_area_struct *vma) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); +	struct device *dev = cma_heap->dev; +	struct ion_cma_buffer_info *info = buffer->priv_virt; + +	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, +				 buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, +				struct ion_buffer *buffer) +{ +	struct ion_cma_buffer_info *info = buffer->priv_virt; +	/* kernel memory mapping has been done at allocation time */ +	return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, +					struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { +	.allocate = ion_cma_allocate, +	.free = ion_cma_free, +	.map_dma = ion_cma_heap_map_dma, +	.unmap_dma = ion_cma_heap_unmap_dma, +	.phys = ion_cma_phys, +	.map_user = ion_cma_mmap, +	.map_kernel = ion_cma_map_kernel, +	.unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ +	struct ion_cma_heap *cma_heap; + +	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + +	if (!cma_heap) +		return ERR_PTR(-ENOMEM); + +	cma_heap->heap.ops = &ion_cma_ops; +	/* get device from private heaps data, later it will be +	 * used to make the link with reserved CMA memory */ +	cma_heap->dev = data->priv; +	cma_heap->heap.type = ION_HEAP_TYPE_DMA; +	return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ +	struct ion_cma_heap *cma_heap = to_cma_heap(heap); + +	kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 00000000000..3a45e79fe44 --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,158 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/sizes.h> +#include <linux/io.h> +#include "ion.h" +#include "ion_priv.h" + +static struct ion_device *idev; +static struct ion_heap **heaps; + +static void *carveout_ptr; +static void *chunk_ptr; + +static struct ion_platform_heap dummy_heaps[] = { +		{ +			.id	= ION_HEAP_TYPE_SYSTEM, +			.type	= ION_HEAP_TYPE_SYSTEM, +			.name	= "system", +		}, +		{ +			.id	= ION_HEAP_TYPE_SYSTEM_CONTIG, +			.type	= ION_HEAP_TYPE_SYSTEM_CONTIG, +			.name	= "system contig", +		}, +		{ +			.id	= ION_HEAP_TYPE_CARVEOUT, +			.type	= ION_HEAP_TYPE_CARVEOUT, +			.name	= "carveout", +			.size	= SZ_4M, +		}, +		{ +			.id	= ION_HEAP_TYPE_CHUNK, +			.type	= ION_HEAP_TYPE_CHUNK, +			.name	= "chunk", +			.size	= SZ_4M, +			.align	= SZ_16K, +			.priv	= (void *)(SZ_16K), +		}, +}; + +static struct ion_platform_data dummy_ion_pdata = { +	.nr = ARRAY_SIZE(dummy_heaps), +	.heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ +	int i, err; + +	idev = ion_device_create(NULL); +	heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, +			GFP_KERNEL); +	if (!heaps) +		return -ENOMEM; + + +	/* Allocate a dummy carveout heap */ +	carveout_ptr = alloc_pages_exact( +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, +				GFP_KERNEL); +	if (carveout_ptr) +		dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = +						virt_to_phys(carveout_ptr); +	else +		pr_err("ion_dummy: Could not allocate carveout\n"); + +	/* Allocate a dummy chunk heap */ +	chunk_ptr = alloc_pages_exact( +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size, +				GFP_KERNEL); +	if (chunk_ptr) +		dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); +	else +		pr_err("ion_dummy: Could not allocate chunk\n"); + +	for (i = 0; i < dummy_ion_pdata.nr; i++) { +		struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + +		if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && +							!heap_data->base) +			continue; + +		if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) +			continue; + +		heaps[i] = ion_heap_create(heap_data); +		if (IS_ERR_OR_NULL(heaps[i])) { +			err = PTR_ERR(heaps[i]); +			goto err; +		} +		ion_device_add_heap(idev, heaps[i]); +	} +	return 0; +err: +	for (i = 0; i < dummy_ion_pdata.nr; i++) { +		if (heaps[i]) +			ion_heap_destroy(heaps[i]); +	} +	kfree(heaps); + +	if (carveout_ptr) { +		free_pages_exact(carveout_ptr, +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); +		carveout_ptr = NULL; +	} +	if (chunk_ptr) { +		free_pages_exact(chunk_ptr, +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size); +		chunk_ptr = NULL; +	} +	return err; +} +device_initcall(ion_dummy_init); + +static void __exit ion_dummy_exit(void) +{ +	int i; + +	ion_device_destroy(idev); + +	for (i = 0; i < dummy_ion_pdata.nr; i++) +		ion_heap_destroy(heaps[i]); +	kfree(heaps); + +	if (carveout_ptr) { +		free_pages_exact(carveout_ptr, +				dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); +		carveout_ptr = NULL; +	} +	if (chunk_ptr) { +		free_pages_exact(chunk_ptr, +				dummy_heaps[ION_HEAP_TYPE_CHUNK].size); +		chunk_ptr = NULL; +	} + +	return; +} +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 00000000000..4605e04712a --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,383 @@ +/* + * drivers/staging/android/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/freezer.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/scatterlist.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, +			  struct ion_buffer *buffer) +{ +	struct scatterlist *sg; +	int i, j; +	void *vaddr; +	pgprot_t pgprot; +	struct sg_table *table = buffer->sg_table; +	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; +	struct page **pages = vmalloc(sizeof(struct page *) * npages); +	struct page **tmp = pages; + +	if (!pages) +		return NULL; + +	if (buffer->flags & ION_FLAG_CACHED) +		pgprot = PAGE_KERNEL; +	else +		pgprot = pgprot_writecombine(PAGE_KERNEL); + +	for_each_sg(table->sgl, sg, table->nents, i) { +		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; +		struct page *page = sg_page(sg); + +		BUG_ON(i >= npages); +		for (j = 0; j < npages_this_entry; j++) +			*(tmp++) = page++; +	} +	vaddr = vmap(pages, npages, VM_MAP, pgprot); +	vfree(pages); + +	if (vaddr == NULL) +		return ERR_PTR(-ENOMEM); + +	return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, +			   struct ion_buffer *buffer) +{ +	vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, +		      struct vm_area_struct *vma) +{ +	struct sg_table *table = buffer->sg_table; +	unsigned long addr = vma->vm_start; +	unsigned long offset = vma->vm_pgoff * PAGE_SIZE; +	struct scatterlist *sg; +	int i; +	int ret; + +	for_each_sg(table->sgl, sg, table->nents, i) { +		struct page *page = sg_page(sg); +		unsigned long remainder = vma->vm_end - addr; +		unsigned long len = sg->length; + +		if (offset >= sg->length) { +			offset -= sg->length; +			continue; +		} else if (offset) { +			page += offset / PAGE_SIZE; +			len = sg->length - offset; +			offset = 0; +		} +		len = min(len, remainder); +		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, +				vma->vm_page_prot); +		if (ret) +			return ret; +		addr += len; +		if (addr >= vma->vm_end) +			return 0; +	} +	return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ +	void *addr = vm_map_ram(pages, num, -1, pgprot); + +	if (!addr) +		return -ENOMEM; +	memset(addr, 0, PAGE_SIZE * num); +	vm_unmap_ram(addr, num); + +	return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, +						pgprot_t pgprot) +{ +	int p = 0; +	int ret = 0; +	struct sg_page_iter piter; +	struct page *pages[32]; + +	for_each_sg_page(sgl, &piter, nents, 0) { +		pages[p++] = sg_page_iter_page(&piter); +		if (p == ARRAY_SIZE(pages)) { +			ret = ion_heap_clear_pages(pages, p, pgprot); +			if (ret) +				return ret; +			p = 0; +		} +	} +	if (p) +		ret = ion_heap_clear_pages(pages, p, pgprot); + +	return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ +	struct sg_table *table = buffer->sg_table; +	pgprot_t pgprot; + +	if (buffer->flags & ION_FLAG_CACHED) +		pgprot = PAGE_KERNEL; +	else +		pgprot = pgprot_writecombine(PAGE_KERNEL); + +	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ +	struct scatterlist sg; + +	sg_init_table(&sg, 1); +	sg_set_page(&sg, page, size, 0); +	return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ +	spin_lock(&heap->free_lock); +	list_add(&buffer->list, &heap->free_list); +	heap->free_list_size += buffer->size; +	spin_unlock(&heap->free_lock); +	wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ +	size_t size; + +	spin_lock(&heap->free_lock); +	size = heap->free_list_size; +	spin_unlock(&heap->free_lock); + +	return size; +} + +static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, +				bool skip_pools) +{ +	struct ion_buffer *buffer; +	size_t total_drained = 0; + +	if (ion_heap_freelist_size(heap) == 0) +		return 0; + +	spin_lock(&heap->free_lock); +	if (size == 0) +		size = heap->free_list_size; + +	while (!list_empty(&heap->free_list)) { +		if (total_drained >= size) +			break; +		buffer = list_first_entry(&heap->free_list, struct ion_buffer, +					  list); +		list_del(&buffer->list); +		heap->free_list_size -= buffer->size; +		if (skip_pools) +			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; +		total_drained += buffer->size; +		spin_unlock(&heap->free_lock); +		ion_buffer_destroy(buffer); +		spin_lock(&heap->free_lock); +	} +	spin_unlock(&heap->free_lock); + +	return total_drained; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ +	return _ion_heap_freelist_drain(heap, size, false); +} + +size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) +{ +	return _ion_heap_freelist_drain(heap, size, true); +} + +static int ion_heap_deferred_free(void *data) +{ +	struct ion_heap *heap = data; + +	while (true) { +		struct ion_buffer *buffer; + +		wait_event_freezable(heap->waitqueue, +				     ion_heap_freelist_size(heap) > 0); + +		spin_lock(&heap->free_lock); +		if (list_empty(&heap->free_list)) { +			spin_unlock(&heap->free_lock); +			continue; +		} +		buffer = list_first_entry(&heap->free_list, struct ion_buffer, +					  list); +		list_del(&buffer->list); +		heap->free_list_size -= buffer->size; +		spin_unlock(&heap->free_lock); +		ion_buffer_destroy(buffer); +	} + +	return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ +	struct sched_param param = { .sched_priority = 0 }; + +	INIT_LIST_HEAD(&heap->free_list); +	heap->free_list_size = 0; +	spin_lock_init(&heap->free_lock); +	init_waitqueue_head(&heap->waitqueue); +	heap->task = kthread_run(ion_heap_deferred_free, heap, +				 "%s", heap->name); +	if (IS_ERR(heap->task)) { +		pr_err("%s: creating thread for deferred free failed\n", +		       __func__); +		return PTR_ERR_OR_ZERO(heap->task); +	} +	sched_setscheduler(heap->task, SCHED_IDLE, ¶m); +	return 0; +} + +static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, +						struct shrink_control *sc) +{ +	struct ion_heap *heap = container_of(shrinker, struct ion_heap, +					     shrinker); +	int total = 0; + +	total = ion_heap_freelist_size(heap) / PAGE_SIZE; +	if (heap->ops->shrink) +		total += heap->ops->shrink(heap, sc->gfp_mask, 0); +	return total; +} + +static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, +						struct shrink_control *sc) +{ +	struct ion_heap *heap = container_of(shrinker, struct ion_heap, +					     shrinker); +	int freed = 0; +	int to_scan = sc->nr_to_scan; + +	if (to_scan == 0) +		return 0; + +	/* +	 * shrink the free list first, no point in zeroing the memory if we're +	 * just going to reclaim it. Also, skip any possible page pooling. +	 */ +	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) +		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / +				PAGE_SIZE; + +	to_scan -= freed; +	if (to_scan <= 0) +		return freed; + +	if (heap->ops->shrink) +		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); +	return freed; +} + +void ion_heap_init_shrinker(struct ion_heap *heap) +{ +	heap->shrinker.count_objects = ion_heap_shrink_count; +	heap->shrinker.scan_objects = ion_heap_shrink_scan; +	heap->shrinker.seeks = DEFAULT_SEEKS; +	heap->shrinker.batch = 0; +	register_shrinker(&heap->shrinker); +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ +	struct ion_heap *heap = NULL; + +	switch (heap_data->type) { +	case ION_HEAP_TYPE_SYSTEM_CONTIG: +		heap = ion_system_contig_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_SYSTEM: +		heap = ion_system_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_CARVEOUT: +		heap = ion_carveout_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_CHUNK: +		heap = ion_chunk_heap_create(heap_data); +		break; +	case ION_HEAP_TYPE_DMA: +		heap = ion_cma_heap_create(heap_data); +		break; +	default: +		pr_err("%s: Invalid heap type %d\n", __func__, +		       heap_data->type); +		return ERR_PTR(-EINVAL); +	} + +	if (IS_ERR_OR_NULL(heap)) { +		pr_err("%s: error creating heap %s type %d base %lu size %zu\n", +		       __func__, heap_data->name, heap_data->type, +		       heap_data->base, heap_data->size); +		return ERR_PTR(-EINVAL); +	} + +	heap->name = heap_data->name; +	heap->id = heap_data->id; +	return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ +	if (!heap) +		return; + +	switch (heap->type) { +	case ION_HEAP_TYPE_SYSTEM_CONTIG: +		ion_system_contig_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_SYSTEM: +		ion_system_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_CARVEOUT: +		ion_carveout_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_CHUNK: +		ion_chunk_heap_destroy(heap); +		break; +	case ION_HEAP_TYPE_DMA: +		ion_cma_heap_destroy(heap); +		break; +	default: +		pr_err("%s: Invalid heap type %d\n", __func__, +		       heap->type); +	} +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 00000000000..5864f3dfcbc --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,182 @@ +/* + * drivers/staging/android/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include "ion_priv.h" + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ +	struct page *page = alloc_pages(pool->gfp_mask, pool->order); + +	if (!page) +		return NULL; +	ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, +						DMA_BIDIRECTIONAL); +	return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, +				     struct page *page) +{ +	__free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ +	mutex_lock(&pool->mutex); +	if (PageHighMem(page)) { +		list_add_tail(&page->lru, &pool->high_items); +		pool->high_count++; +	} else { +		list_add_tail(&page->lru, &pool->low_items); +		pool->low_count++; +	} +	mutex_unlock(&pool->mutex); +	return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ +	struct page *page; + +	if (high) { +		BUG_ON(!pool->high_count); +		page = list_first_entry(&pool->high_items, struct page, lru); +		pool->high_count--; +	} else { +		BUG_ON(!pool->low_count); +		page = list_first_entry(&pool->low_items, struct page, lru); +		pool->low_count--; +	} + +	list_del(&page->lru); +	return page; +} + +struct page *ion_page_pool_alloc(struct ion_page_pool *pool) +{ +	struct page *page = NULL; + +	BUG_ON(!pool); + +	mutex_lock(&pool->mutex); +	if (pool->high_count) +		page = ion_page_pool_remove(pool, true); +	else if (pool->low_count) +		page = ion_page_pool_remove(pool, false); +	mutex_unlock(&pool->mutex); + +	if (!page) +		page = ion_page_pool_alloc_pages(pool); + +	return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ +	int ret; + +	BUG_ON(pool->order != compound_order(page)); + +	ret = ion_page_pool_add(pool, page); +	if (ret) +		ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ +	int count = pool->low_count; + +	if (high) +		count += pool->high_count; + +	return count << pool->order; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, +				int nr_to_scan) +{ +	int freed; +	bool high; + +	if (current_is_kswapd()) +		high = 1; +	else +		high = !!(gfp_mask & __GFP_HIGHMEM); + +	if (nr_to_scan == 0) +		return ion_page_pool_total(pool, high); + +	for (freed = 0; freed < nr_to_scan; freed++) { +		struct page *page; + +		mutex_lock(&pool->mutex); +		if (pool->low_count) { +			page = ion_page_pool_remove(pool, false); +		} else if (high && pool->high_count) { +			page = ion_page_pool_remove(pool, true); +		} else { +			mutex_unlock(&pool->mutex); +			break; +		} +		mutex_unlock(&pool->mutex); +		ion_page_pool_free_pages(pool, page); +	} + +	return freed; +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ +	struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), +					     GFP_KERNEL); +	if (!pool) +		return NULL; +	pool->high_count = 0; +	pool->low_count = 0; +	INIT_LIST_HEAD(&pool->low_items); +	INIT_LIST_HEAD(&pool->high_items); +	pool->gfp_mask = gfp_mask | __GFP_COMP; +	pool->order = order; +	mutex_init(&pool->mutex); +	plist_node_init(&pool->list, order); + +	return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ +	kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ +	return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 00000000000..c8f01757abf --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,405 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/shrinker.h> +#include <linux/types.h> + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref:		refernce count + * @node:		node in the ion_device buffers tree + * @dev:		back pointer to the ion_device + * @heap:		back pointer to the heap the buffer came from + * @flags:		buffer specific flags + * @private_flags:	internal buffer specific flags + * @size:		size of the buffer + * @priv_virt:		private data to the buffer representable as + *			a void * + * @priv_phys:		private data to the buffer representable as + *			an ion_phys_addr_t (and someday a phys_addr_t) + * @lock:		protects the buffers cnt fields + * @kmap_cnt:		number of times the buffer is mapped to the kernel + * @vaddr:		the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt:		number of times the buffer is mapped for dma + * @sg_table:		the sg table for the buffer if dmap_cnt is not zero + * @pages:		flat array of pages in the buffer -- used by fault + *			handler and only valid for buffers that are faulted in + * @vmas:		list of vma's mapping this buffer + * @handle_count:	count of handles referencing this buffer + * @task_comm:		taskcomm of last client to reference this buffer in a + *			handle, used for debugging + * @pid:		pid of last client to reference this buffer in a + *			handle, used for debugging +*/ +struct ion_buffer { +	struct kref ref; +	union { +		struct rb_node node; +		struct list_head list; +	}; +	struct ion_device *dev; +	struct ion_heap *heap; +	unsigned long flags; +	unsigned long private_flags; +	size_t size; +	union { +		void *priv_virt; +		ion_phys_addr_t priv_phys; +	}; +	struct mutex lock; +	int kmap_cnt; +	void *vaddr; +	int dmap_cnt; +	struct sg_table *sg_table; +	struct page **pages; +	struct list_head vmas; +	/* used to track orphaned buffers */ +	int handle_count; +	char task_comm[TASK_COMM_LEN]; +	pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate:		allocate memory + * @free:		free memory + * @phys		get physical address of a buffer (only define on + *			physically contiguous heaps) + * @map_dma		map the memory for dma to a scatterlist + * @unmap_dma		unmap the memory for dma + * @map_kernel		map memory to the kernel + * @unmap_kernel	unmap memory to the kernel + * @map_user		map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. + */ +struct ion_heap_ops { +	int (*allocate)(struct ion_heap *heap, +			struct ion_buffer *buffer, unsigned long len, +			unsigned long align, unsigned long flags); +	void (*free)(struct ion_buffer *buffer); +	int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer, +		    ion_phys_addr_t *addr, size_t *len); +	struct sg_table * (*map_dma)(struct ion_heap *heap, +				     struct ion_buffer *buffer); +	void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer); +	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); +	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); +	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, +			struct vm_area_struct *vma); +	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node:		rb node to put the heap on the device's tree of heaps + * @dev:		back pointer to the ion_device + * @type:		type of heap + * @ops:		ops struct as above + * @flags:		flags + * @id:			id of heap, also indicates priority of this heap when + *			allocating.  These are specified by platform data and + *			MUST be unique + * @name:		used for debugging + * @shrinker:		a shrinker for the heap + * @free_list:		free list head if deferred free is used + * @free_list_size	size of the deferred free list in bytes + * @lock:		protects the free list + * @waitqueue:		queue to wait on from deferred free thread + * @task:		task struct of deferred free thread + * @debug_show:		called when heap debug file is read to add any + *			heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made.  In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { +	struct plist_node node; +	struct ion_device *dev; +	enum ion_heap_type type; +	struct ion_heap_ops *ops; +	unsigned long flags; +	unsigned int id; +	const char *name; +	struct shrinker shrinker; +	struct list_head free_list; +	size_t free_list_size; +	spinlock_t free_lock; +	wait_queue_head_t waitqueue; +	struct task_struct *task; + +	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer:		buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer:		buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl:	arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) +				     (struct ion_client *client, +				      unsigned int cmd, +				      unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev:		the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev:		the device + * @heap:		the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, +			struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_shrinker + * @heap:		the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap:		the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap:		the heap + * @buffer:		the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap:		the heap + * @size:		ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed.  The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_shrink - drain the deferred free + *				list, skipping any heap-specific + *				pooling or caching mechanisms + * + * @heap:		the heap + * @size:		amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed.  The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. + */ +size_t ion_heap_freelist_shrink(struct ion_heap *heap, +					size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap:		the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, +				      unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, +		       unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap.  Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant peformance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count:		number of highmem items in the pool + * @low_count:		number of lowmem items in the pool + * @high_items:		list of highmem items + * @low_items:		list of lowmem items + * @mutex:		lock protecting this struct and especially the count + *			item list + * @gfp_mask:		gfp_mask to use from alloc + * @order:		order of pages in the pool + * @list:		plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant peformance benefit + * on many systems + */ +struct ion_page_pool { +	int high_count; +	int low_count; +	struct list_head high_items; +	struct list_head low_items; +	struct mutex mutex; +	gfp_t gfp_mask; +	unsigned int order; +	struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +struct page *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool:		the pool + * @gfp_mask:		the memory type to reclaim + * @nr_to_scan:		number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, +			  int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + *                             device + * @dev:		the device the pages will be used with + * @page:		the first page to be flushed + * @size:		size in bytes of region to be flushed + * @dir:		direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, +		size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 00000000000..cb7ae08a5e2 --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,446 @@ +/* + * drivers/staging/android/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <asm/page.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | +				     __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ +	int i; + +	for (i = 0; i < num_orders; i++) +		if (order == orders[i]) +			return i; +	BUG(); +	return -1; +} + +static inline unsigned int order_to_size(int order) +{ +	return PAGE_SIZE << order; +} + +struct ion_system_heap { +	struct ion_heap heap; +	struct ion_page_pool **pools; +}; + +struct page_info { +	struct page *page; +	unsigned int order; +	struct list_head list; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, +				      struct ion_buffer *buffer, +				      unsigned long order) +{ +	bool cached = ion_buffer_cached(buffer); +	struct ion_page_pool *pool = heap->pools[order_to_index(order)]; +	struct page *page; + +	if (!cached) { +		page = ion_page_pool_alloc(pool); +	} else { +		gfp_t gfp_flags = low_order_gfp_flags; + +		if (order > 4) +			gfp_flags = high_order_gfp_flags; +		page = alloc_pages(gfp_flags | __GFP_COMP, order); +		if (!page) +			return NULL; +		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, +						DMA_BIDIRECTIONAL); +	} + +	return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, +			     struct ion_buffer *buffer, struct page *page, +			     unsigned int order) +{ +	bool cached = ion_buffer_cached(buffer); + +	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) { +		struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + +		ion_page_pool_free(pool, page); +	} else { +		__free_pages(page, order); +	} +} + + +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, +						 struct ion_buffer *buffer, +						 unsigned long size, +						 unsigned int max_order) +{ +	struct page *page; +	struct page_info *info; +	int i; + +	info = kmalloc(sizeof(struct page_info), GFP_KERNEL); +	if (!info) +		return NULL; + +	for (i = 0; i < num_orders; i++) { +		if (size < order_to_size(orders[i])) +			continue; +		if (max_order < orders[i]) +			continue; + +		page = alloc_buffer_page(heap, buffer, orders[i]); +		if (!page) +			continue; + +		info->page = page; +		info->order = orders[i]; +		return info; +	} +	kfree(info); + +	return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, +				     struct ion_buffer *buffer, +				     unsigned long size, unsigned long align, +				     unsigned long flags) +{ +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	struct sg_table *table; +	struct scatterlist *sg; +	struct list_head pages; +	struct page_info *info, *tmp_info; +	int i = 0; +	unsigned long size_remaining = PAGE_ALIGN(size); +	unsigned int max_order = orders[0]; + +	if (align > PAGE_SIZE) +		return -EINVAL; + +	if (size / PAGE_SIZE > totalram_pages / 2) +		return -ENOMEM; + +	INIT_LIST_HEAD(&pages); +	while (size_remaining > 0) { +		info = alloc_largest_available(sys_heap, buffer, size_remaining, +						max_order); +		if (!info) +			goto free_pages; +		list_add_tail(&info->list, &pages); +		size_remaining -= PAGE_SIZE << info->order; +		max_order = info->order; +		i++; +	} +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) +		goto free_pages; + +	if (sg_alloc_table(table, i, GFP_KERNEL)) +		goto free_table; + +	sg = table->sgl; +	list_for_each_entry_safe(info, tmp_info, &pages, list) { +		struct page *page = info->page; +		sg_set_page(sg, page, PAGE_SIZE << info->order, 0); +		sg = sg_next(sg); +		list_del(&info->list); +		kfree(info); +	} + +	buffer->priv_virt = table; +	return 0; + +free_table: +	kfree(table); +free_pages: +	list_for_each_entry_safe(info, tmp_info, &pages, list) { +		free_buffer_page(sys_heap, buffer, info->page, info->order); +		kfree(info); +	} +	return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ +	struct ion_system_heap *sys_heap = container_of(buffer->heap, +							struct ion_system_heap, +							heap); +	struct sg_table *table = buffer->sg_table; +	bool cached = ion_buffer_cached(buffer); +	struct scatterlist *sg; +	int i; + +	/* uncached pages come from the page pools, zero them before returning +	   for security purposes (other allocations are zerod at alloc time */ +	if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) +		ion_heap_buffer_zero(buffer); + +	for_each_sg(table->sgl, sg, table->nents, i) +		free_buffer_page(sys_heap, buffer, sg_page(sg), +				get_order(sg->length)); +	sg_free_table(table); +	kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, +						struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, +				      struct ion_buffer *buffer) +{ +	return; +} + +static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask, +					int nr_to_scan) +{ +	struct ion_system_heap *sys_heap; +	int nr_total = 0; +	int i; + +	sys_heap = container_of(heap, struct ion_system_heap, heap); + +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool = sys_heap->pools[i]; + +		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan); +	} + +	return nr_total; +} + +static struct ion_heap_ops system_heap_ops = { +	.allocate = ion_system_heap_allocate, +	.free = ion_system_heap_free, +	.map_dma = ion_system_heap_map_dma, +	.unmap_dma = ion_system_heap_unmap_dma, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +	.map_user = ion_heap_map_user, +	.shrink = ion_system_heap_shrink, +}; + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, +				      void *unused) +{ + +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	int i; + +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool = sys_heap->pools[i]; + +		seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", +			   pool->high_count, pool->order, +			   (PAGE_SIZE << pool->order) * pool->high_count); +		seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", +			   pool->low_count, pool->order, +			   (PAGE_SIZE << pool->order) * pool->low_count); +	} +	return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ +	struct ion_system_heap *heap; +	int i; + +	heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); +	if (!heap) +		return ERR_PTR(-ENOMEM); +	heap->heap.ops = &system_heap_ops; +	heap->heap.type = ION_HEAP_TYPE_SYSTEM; +	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; +	heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, +			      GFP_KERNEL); +	if (!heap->pools) +		goto free_heap; +	for (i = 0; i < num_orders; i++) { +		struct ion_page_pool *pool; +		gfp_t gfp_flags = low_order_gfp_flags; + +		if (orders[i] > 4) +			gfp_flags = high_order_gfp_flags; +		pool = ion_page_pool_create(gfp_flags, orders[i]); +		if (!pool) +			goto destroy_pools; +		heap->pools[i] = pool; +	} + +	heap->heap.debug_show = ion_system_heap_debug_show; +	return &heap->heap; + +destroy_pools: +	while (i--) +		ion_page_pool_destroy(heap->pools[i]); +	kfree(heap->pools); +free_heap: +	kfree(heap); +	return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ +	struct ion_system_heap *sys_heap = container_of(heap, +							struct ion_system_heap, +							heap); +	int i; + +	for (i = 0; i < num_orders; i++) +		ion_page_pool_destroy(sys_heap->pools[i]); +	kfree(sys_heap->pools); +	kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, +					   struct ion_buffer *buffer, +					   unsigned long len, +					   unsigned long align, +					   unsigned long flags) +{ +	int order = get_order(len); +	struct page *page; +	struct sg_table *table; +	unsigned long i; +	int ret; + +	if (align > (PAGE_SIZE << order)) +		return -EINVAL; + +	page = alloc_pages(low_order_gfp_flags, order); +	if (!page) +		return -ENOMEM; + +	split_page(page, order); + +	len = PAGE_ALIGN(len); +	for (i = len >> PAGE_SHIFT; i < (1 << order); i++) +		__free_page(page + i); + +	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); +	if (!table) { +		ret = -ENOMEM; +		goto free_pages; +	} + +	ret = sg_alloc_table(table, 1, GFP_KERNEL); +	if (ret) +		goto free_table; + +	sg_set_page(table->sgl, page, len, 0); + +	buffer->priv_virt = table; + +	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + +	return 0; + +free_table: +	kfree(table); +free_pages: +	for (i = 0; i < len >> PAGE_SHIFT; i++) +		__free_page(page + i); + +	return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; +	unsigned long i; + +	for (i = 0; i < pages; i++) +		__free_page(page + i); +	sg_free_table(table); +	kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, +				       struct ion_buffer *buffer, +				       ion_phys_addr_t *addr, size_t *len) +{ +	struct sg_table *table = buffer->priv_virt; +	struct page *page = sg_page(table->sgl); +	*addr = page_to_phys(page); +	*len = buffer->size; +	return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, +						struct ion_buffer *buffer) +{ +	return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, +					     struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { +	.allocate = ion_system_contig_heap_allocate, +	.free = ion_system_contig_heap_free, +	.phys = ion_system_contig_heap_phys, +	.map_dma = ion_system_contig_heap_map_dma, +	.unmap_dma = ion_system_contig_heap_unmap_dma, +	.map_kernel = ion_heap_map_kernel, +	.unmap_kernel = ion_heap_unmap_kernel, +	.map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ +	struct ion_heap *heap; + +	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); +	if (!heap) +		return ERR_PTR(-ENOMEM); +	heap->ops = &kmalloc_ops; +	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; +	return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ +	kfree(heap); +} diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 00000000000..654acb5c8eb --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,282 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include <linux/dma-buf.h> +#include <linux/dma-direction.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { +	struct miscdevice misc; +}; + +struct ion_test_data { +	struct dma_buf *dma_buf; +	struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, +		void __user *ptr, size_t offset, size_t size, bool write) +{ +	int ret = 0; +	struct dma_buf_attachment *attach; +	struct sg_table *table; +	pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); +	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; +	struct sg_page_iter sg_iter; +	unsigned long offset_page; + +	attach = dma_buf_attach(dma_buf, dev); +	if (IS_ERR(attach)) +		return PTR_ERR(attach); + +	table = dma_buf_map_attachment(attach, dir); +	if (IS_ERR(table)) +		return PTR_ERR(table); + +	offset_page = offset >> PAGE_SHIFT; +	offset %= PAGE_SIZE; + +	for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { +		struct page *page = sg_page_iter_page(&sg_iter); +		void *vaddr = vmap(&page, 1, VM_MAP, pgprot); +		size_t to_copy = PAGE_SIZE - offset; + +		to_copy = min(to_copy, size); +		if (!vaddr) { +			ret = -ENOMEM; +			goto err; +		} + +		if (write) +			ret = copy_from_user(vaddr + offset, ptr, to_copy); +		else +			ret = copy_to_user(ptr, vaddr + offset, to_copy); + +		vunmap(vaddr); +		if (ret) { +			ret = -EFAULT; +			goto err; +		} +		size -= to_copy; +		if (!size) +			break; +		ptr += to_copy; +		offset = 0; +	} + +err: +	dma_buf_unmap_attachment(attach, table, dir); +	dma_buf_detach(dma_buf, attach); +	return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, +		size_t offset, size_t size, bool write) +{ +	int ret; +	unsigned long page_offset = offset >> PAGE_SHIFT; +	size_t copy_offset = offset % PAGE_SIZE; +	size_t copy_size = size; +	enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + +	if (offset > dma_buf->size || size > dma_buf->size - offset) +		return -EINVAL; + +	ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); +	if (ret) +		return ret; + +	while (copy_size > 0) { +		size_t to_copy; +		void *vaddr = dma_buf_kmap(dma_buf, page_offset); + +		if (!vaddr) +			goto err; + +		to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + +		if (write) +			ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); +		else +			ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + +		dma_buf_kunmap(dma_buf, page_offset, vaddr); +		if (ret) { +			ret = -EFAULT; +			goto err; +		} + +		copy_size -= to_copy; +		ptr += to_copy; +		page_offset++; +		copy_offset = 0; +	} +err: +	dma_buf_end_cpu_access(dma_buf, offset, size, dir); +	return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, +						unsigned long arg) +{ +	struct ion_test_data *test_data = filp->private_data; +	int ret = 0; + +	union { +		struct ion_test_rw_data test_rw; +	} data; + +	if (_IOC_SIZE(cmd) > sizeof(data)) +		return -EINVAL; + +	if (_IOC_DIR(cmd) & _IOC_WRITE) +		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) +			return -EFAULT; + +	switch (cmd) { +	case ION_IOC_TEST_SET_FD: +	{ +		struct dma_buf *dma_buf = NULL; +		int fd = arg; + +		if (fd >= 0) { +			dma_buf = dma_buf_get((int)arg); +			if (IS_ERR(dma_buf)) +				return PTR_ERR(dma_buf); +		} +		if (test_data->dma_buf) +			dma_buf_put(test_data->dma_buf); +		test_data->dma_buf = dma_buf; +		break; +	} +	case ION_IOC_TEST_DMA_MAPPING: +	{ +		ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, +					u64_to_uptr(data.test_rw.ptr), +					data.test_rw.offset, data.test_rw.size, +					data.test_rw.write); +		break; +	} +	case ION_IOC_TEST_KERNEL_MAPPING: +	{ +		ret = ion_handle_test_kernel(test_data->dma_buf, +					u64_to_uptr(data.test_rw.ptr), +					data.test_rw.offset, data.test_rw.size, +					data.test_rw.write); +		break; +	} +	default: +		return -ENOTTY; +	} + +	if (_IOC_DIR(cmd) & _IOC_READ) { +		if (copy_to_user((void __user *)arg, &data, sizeof(data))) +			return -EFAULT; +	} +	return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ +	struct ion_test_data *data; +	struct miscdevice *miscdev = file->private_data; + +	data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); +	if (!data) +		return -ENOMEM; + +	data->dev = miscdev->parent; + +	file->private_data = data; + +	return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ +	struct ion_test_data *data = file->private_data; + +	kfree(data); + +	return 0; +} + +static const struct file_operations ion_test_fops = { +	.owner = THIS_MODULE, +	.unlocked_ioctl = ion_test_ioctl, +	.compat_ioctl = ion_test_ioctl, +	.open = ion_test_open, +	.release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ +	int ret; +	struct ion_test_device *testdev; + +	testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), +				GFP_KERNEL); +	if (!testdev) +		return -ENOMEM; + +	testdev->misc.minor = MISC_DYNAMIC_MINOR; +	testdev->misc.name = "ion-test"; +	testdev->misc.fops = &ion_test_fops; +	testdev->misc.parent = &pdev->dev; +	ret = misc_register(&testdev->misc); +	if (ret) { +		pr_err("failed to register misc device.\n"); +		return ret; +	} + +	platform_set_drvdata(pdev, testdev); + +	return 0; +} + +static struct platform_driver ion_test_platform_driver = { +	.driver = { +		.name = "ion-test", +	}, +}; + +static int __init ion_test_init(void) +{ +	platform_device_register_simple("ion-test", -1, NULL, 0); +	return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ +	platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 00000000000..11cd003fb08 --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000..11c7cceb3c7 --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,82 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ +	struct ion_platform_data *pdata = pdev->dev.platform_data; +	int err; +	int i; + +	num_heaps = pdata->nr; + +	heaps = devm_kzalloc(&pdev->dev, +			     sizeof(struct ion_heap *) * pdata->nr, +			     GFP_KERNEL); + +	idev = ion_device_create(NULL); +	if (IS_ERR_OR_NULL(idev)) +		return PTR_ERR(idev); + +	/* create the heaps as specified in the board file */ +	for (i = 0; i < num_heaps; i++) { +		struct ion_platform_heap *heap_data = &pdata->heaps[i]; + +		heaps[i] = ion_heap_create(heap_data); +		if (IS_ERR_OR_NULL(heaps[i])) { +			err = PTR_ERR(heaps[i]); +			goto err; +		} +		ion_device_add_heap(idev, heaps[i]); +	} +	platform_set_drvdata(pdev, idev); +	return 0; +err: +	for (i = 0; i < num_heaps; i++) { +		if (heaps[i]) +			ion_heap_destroy(heaps[i]); +	} +	return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ +	struct ion_device *idev = platform_get_drvdata(pdev); +	int i; + +	ion_device_destroy(idev); +	for (i = 0; i < num_heaps; i++) +		ion_heap_destroy(heaps[i]); +	return 0; +} + +static struct platform_driver ion_driver = { +	.probe = tegra_ion_probe, +	.remove = tegra_ion_remove, +	.driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c new file mode 100644 index 00000000000..2772e01b37f --- /dev/null +++ b/drivers/staging/android/logger.c @@ -0,0 +1,854 @@ +/* + * drivers/misc/logger.c + * + * A Logging Subsystem + * + * Copyright (C) 2007-2008 Google, Inc. + * + * Robert Love <rlove@google.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "logger: " fmt + +#include <linux/sched.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/time.h> +#include <linux/vmalloc.h> +#include <linux/aio.h> +#include "logger.h" + +#include <asm/ioctls.h> + +/** + * struct logger_log - represents a specific log, such as 'main' or 'radio' + * @buffer:	The actual ring buffer + * @misc:	The "misc" device representing the log + * @wq:		The wait queue for @readers + * @readers:	This log's readers + * @mutex:	The mutex that protects the @buffer + * @w_off:	The current write head offset + * @head:	The head, or location that readers start reading at. + * @size:	The size of the log + * @logs:	The list of log channels + * + * This structure lives from module insertion until module removal, so it does + * not need additional reference counting. The structure is protected by the + * mutex 'mutex'. + */ +struct logger_log { +	unsigned char		*buffer; +	struct miscdevice	misc; +	wait_queue_head_t	wq; +	struct list_head	readers; +	struct mutex		mutex; +	size_t			w_off; +	size_t			head; +	size_t			size; +	struct list_head	logs; +}; + +static LIST_HEAD(log_list); + + +/** + * struct logger_reader - a logging device open for reading + * @log:	The associated log + * @list:	The associated entry in @logger_log's list + * @r_off:	The current read head offset. + * @r_all:	Reader can read all entries + * @r_ver:	Reader ABI version + * + * This object lives from open to release, so we don't need additional + * reference counting. The structure is protected by log->mutex. + */ +struct logger_reader { +	struct logger_log	*log; +	struct list_head	list; +	size_t			r_off; +	bool			r_all; +	int			r_ver; +}; + +/* logger_offset - returns index 'n' into the log via (optimized) modulus */ +static size_t logger_offset(struct logger_log *log, size_t n) +{ +	return n & (log->size - 1); +} + + +/* + * file_get_log - Given a file structure, return the associated log + * + * This isn't aesthetic. We have several goals: + * + *	1) Need to quickly obtain the associated log during an I/O operation + *	2) Readers need to maintain state (logger_reader) + *	3) Writers need to be very fast (open() should be a near no-op) + * + * In the reader case, we can trivially go file->logger_reader->logger_log. + * For a writer, we don't want to maintain a logger_reader, so we just go + * file->logger_log. Thus what file->private_data points at depends on whether + * or not the file was opened for reading. This function hides that dirtiness. + */ +static inline struct logger_log *file_get_log(struct file *file) +{ +	if (file->f_mode & FMODE_READ) { +		struct logger_reader *reader = file->private_data; + +		return reader->log; +	} else +		return file->private_data; +} + +/* + * get_entry_header - returns a pointer to the logger_entry header within + * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must + * be provided. Typically the return value will be a pointer within + * 'logger->buf'.  However, a pointer to 'scratch' may be returned if + * the log entry spans the end and beginning of the circular buffer. + */ +static struct logger_entry *get_entry_header(struct logger_log *log, +		size_t off, struct logger_entry *scratch) +{ +	size_t len = min(sizeof(struct logger_entry), log->size - off); + +	if (len != sizeof(struct logger_entry)) { +		memcpy(((void *) scratch), log->buffer + off, len); +		memcpy(((void *) scratch) + len, log->buffer, +			sizeof(struct logger_entry) - len); +		return scratch; +	} + +	return (struct logger_entry *) (log->buffer + off); +} + +/* + * get_entry_msg_len - Grabs the length of the message of the entry + * starting from from 'off'. + * + * An entry length is 2 bytes (16 bits) in host endian order. + * In the log, the length does not include the size of the log entry structure. + * This function returns the size including the log entry structure. + * + * Caller needs to hold log->mutex. + */ +static __u32 get_entry_msg_len(struct logger_log *log, size_t off) +{ +	struct logger_entry scratch; +	struct logger_entry *entry; + +	entry = get_entry_header(log, off, &scratch); +	return entry->len; +} + +static size_t get_user_hdr_len(int ver) +{ +	if (ver < 2) +		return sizeof(struct user_logger_entry_compat); +	else +		return sizeof(struct logger_entry); +} + +static ssize_t copy_header_to_user(int ver, struct logger_entry *entry, +					 char __user *buf) +{ +	void *hdr; +	size_t hdr_len; +	struct user_logger_entry_compat v1; + +	if (ver < 2) { +		v1.len      = entry->len; +		v1.__pad    = 0; +		v1.pid      = entry->pid; +		v1.tid      = entry->tid; +		v1.sec      = entry->sec; +		v1.nsec     = entry->nsec; +		hdr         = &v1; +		hdr_len     = sizeof(struct user_logger_entry_compat); +	} else { +		hdr         = entry; +		hdr_len     = sizeof(struct logger_entry); +	} + +	return copy_to_user(buf, hdr, hdr_len); +} + +/* + * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the + * user-space buffer 'buf'. Returns 'count' on success. + * + * Caller must hold log->mutex. + */ +static ssize_t do_read_log_to_user(struct logger_log *log, +				   struct logger_reader *reader, +				   char __user *buf, +				   size_t count) +{ +	struct logger_entry scratch; +	struct logger_entry *entry; +	size_t len; +	size_t msg_start; + +	/* +	 * First, copy the header to userspace, using the version of +	 * the header requested +	 */ +	entry = get_entry_header(log, reader->r_off, &scratch); +	if (copy_header_to_user(reader->r_ver, entry, buf)) +		return -EFAULT; + +	count -= get_user_hdr_len(reader->r_ver); +	buf += get_user_hdr_len(reader->r_ver); +	msg_start = logger_offset(log, +		reader->r_off + sizeof(struct logger_entry)); + +	/* +	 * We read from the msg in two disjoint operations. First, we read from +	 * the current msg head offset up to 'count' bytes or to the end of +	 * the log, whichever comes first. +	 */ +	len = min(count, log->size - msg_start); +	if (copy_to_user(buf, log->buffer + msg_start, len)) +		return -EFAULT; + +	/* +	 * Second, we read any remaining bytes, starting back at the head of +	 * the log. +	 */ +	if (count != len) +		if (copy_to_user(buf + len, log->buffer, count - len)) +			return -EFAULT; + +	reader->r_off = logger_offset(log, reader->r_off + +		sizeof(struct logger_entry) + count); + +	return count + get_user_hdr_len(reader->r_ver); +} + +/* + * get_next_entry_by_uid - Starting at 'off', returns an offset into + * 'log->buffer' which contains the first entry readable by 'euid' + */ +static size_t get_next_entry_by_uid(struct logger_log *log, +		size_t off, kuid_t euid) +{ +	while (off != log->w_off) { +		struct logger_entry *entry; +		struct logger_entry scratch; +		size_t next_len; + +		entry = get_entry_header(log, off, &scratch); + +		if (uid_eq(entry->euid, euid)) +			return off; + +		next_len = sizeof(struct logger_entry) + entry->len; +		off = logger_offset(log, off + next_len); +	} + +	return off; +} + +/* + * logger_read - our log's read() method + * + * Behavior: + * + *	- O_NONBLOCK works + *	- If there are no log entries to read, blocks until log is written to + *	- Atomically reads exactly one log entry + * + * Will set errno to EINVAL if read + * buffer is insufficient to hold next entry. + */ +static ssize_t logger_read(struct file *file, char __user *buf, +			   size_t count, loff_t *pos) +{ +	struct logger_reader *reader = file->private_data; +	struct logger_log *log = reader->log; +	ssize_t ret; +	DEFINE_WAIT(wait); + +start: +	while (1) { +		mutex_lock(&log->mutex); + +		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); + +		ret = (log->w_off == reader->r_off); +		mutex_unlock(&log->mutex); +		if (!ret) +			break; + +		if (file->f_flags & O_NONBLOCK) { +			ret = -EAGAIN; +			break; +		} + +		if (signal_pending(current)) { +			ret = -EINTR; +			break; +		} + +		schedule(); +	} + +	finish_wait(&log->wq, &wait); +	if (ret) +		return ret; + +	mutex_lock(&log->mutex); + +	if (!reader->r_all) +		reader->r_off = get_next_entry_by_uid(log, +			reader->r_off, current_euid()); + +	/* is there still something to read or did we race? */ +	if (unlikely(log->w_off == reader->r_off)) { +		mutex_unlock(&log->mutex); +		goto start; +	} + +	/* get the size of the next entry */ +	ret = get_user_hdr_len(reader->r_ver) + +		get_entry_msg_len(log, reader->r_off); +	if (count < ret) { +		ret = -EINVAL; +		goto out; +	} + +	/* get exactly one entry from the log */ +	ret = do_read_log_to_user(log, reader, buf, ret); + +out: +	mutex_unlock(&log->mutex); + +	return ret; +} + +/* + * get_next_entry - return the offset of the first valid entry at least 'len' + * bytes after 'off'. + * + * Caller must hold log->mutex. + */ +static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) +{ +	size_t count = 0; + +	do { +		size_t nr = sizeof(struct logger_entry) + +			get_entry_msg_len(log, off); +		off = logger_offset(log, off + nr); +		count += nr; +	} while (count < len); + +	return off; +} + +/* + * is_between - is a < c < b, accounting for wrapping of a, b, and c + *    positions in the buffer + * + * That is, if a<b, check for c between a and b + * and if a>b, check for c outside (not between) a and b + * + * |------- a xxxxxxxx b --------| + *               c^ + * + * |xxxxx b --------- a xxxxxxxxx| + *    c^ + *  or                    c^ + */ +static inline int is_between(size_t a, size_t b, size_t c) +{ +	if (a < b) { +		/* is c between a and b? */ +		if (a < c && c <= b) +			return 1; +	} else { +		/* is c outside of b through a? */ +		if (c <= b || a < c) +			return 1; +	} + +	return 0; +} + +/* + * fix_up_readers - walk the list of all readers and "fix up" any who were + * lapped by the writer; also do the same for the default "start head". + * We do this by "pulling forward" the readers and start head to the first + * entry after the new write head. + * + * The caller needs to hold log->mutex. + */ +static void fix_up_readers(struct logger_log *log, size_t len) +{ +	size_t old = log->w_off; +	size_t new = logger_offset(log, old + len); +	struct logger_reader *reader; + +	if (is_between(old, new, log->head)) +		log->head = get_next_entry(log, log->head, len); + +	list_for_each_entry(reader, &log->readers, list) +		if (is_between(old, new, reader->r_off)) +			reader->r_off = get_next_entry(log, reader->r_off, len); +} + +/* + * do_write_log - writes 'len' bytes from 'buf' to 'log' + * + * The caller needs to hold log->mutex. + */ +static void do_write_log(struct logger_log *log, const void *buf, size_t count) +{ +	size_t len; + +	len = min(count, log->size - log->w_off); +	memcpy(log->buffer + log->w_off, buf, len); + +	if (count != len) +		memcpy(log->buffer, buf + len, count - len); + +	log->w_off = logger_offset(log, log->w_off + count); + +} + +/* + * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to + * the log 'log' + * + * The caller needs to hold log->mutex. + * + * Returns 'count' on success, negative error code on failure. + */ +static ssize_t do_write_log_from_user(struct logger_log *log, +				      const void __user *buf, size_t count) +{ +	size_t len; + +	len = min(count, log->size - log->w_off); +	if (len && copy_from_user(log->buffer + log->w_off, buf, len)) +		return -EFAULT; + +	if (count != len) +		if (copy_from_user(log->buffer, buf + len, count - len)) +			/* +			 * Note that by not updating w_off, this abandons the +			 * portion of the new entry that *was* successfully +			 * copied, just above.  This is intentional to avoid +			 * message corruption from missing fragments. +			 */ +			return -EFAULT; + +	log->w_off = logger_offset(log, log->w_off + count); + +	return count; +} + +/* + * logger_aio_write - our write method, implementing support for write(), + * writev(), and aio_write(). Writes are our fast path, and we try to optimize + * them above all else. + */ +static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, +			 unsigned long nr_segs, loff_t ppos) +{ +	struct logger_log *log = file_get_log(iocb->ki_filp); +	size_t orig; +	struct logger_entry header; +	struct timespec now; +	ssize_t ret = 0; + +	now = current_kernel_time(); + +	header.pid = current->tgid; +	header.tid = current->pid; +	header.sec = now.tv_sec; +	header.nsec = now.tv_nsec; +	header.euid = current_euid(); +	header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD); +	header.hdr_size = sizeof(struct logger_entry); + +	/* null writes succeed, return zero */ +	if (unlikely(!header.len)) +		return 0; + +	mutex_lock(&log->mutex); + +	orig = log->w_off; + +	/* +	 * Fix up any readers, pulling them forward to the first readable +	 * entry after (what will be) the new write offset. We do this now +	 * because if we partially fail, we can end up with clobbered log +	 * entries that encroach on readable buffer. +	 */ +	fix_up_readers(log, sizeof(struct logger_entry) + header.len); + +	do_write_log(log, &header, sizeof(struct logger_entry)); + +	while (nr_segs-- > 0) { +		size_t len; +		ssize_t nr; + +		/* figure out how much of this vector we can keep */ +		len = min_t(size_t, iov->iov_len, header.len - ret); + +		/* write out this segment's payload */ +		nr = do_write_log_from_user(log, iov->iov_base, len); +		if (unlikely(nr < 0)) { +			log->w_off = orig; +			mutex_unlock(&log->mutex); +			return nr; +		} + +		iov++; +		ret += nr; +	} + +	mutex_unlock(&log->mutex); + +	/* wake up any blocked readers */ +	wake_up_interruptible(&log->wq); + +	return ret; +} + +static struct logger_log *get_log_from_minor(int minor) +{ +	struct logger_log *log; + +	list_for_each_entry(log, &log_list, logs) +		if (log->misc.minor == minor) +			return log; +	return NULL; +} + +/* + * logger_open - the log's open() file operation + * + * Note how near a no-op this is in the write-only case. Keep it that way! + */ +static int logger_open(struct inode *inode, struct file *file) +{ +	struct logger_log *log; +	int ret; + +	ret = nonseekable_open(inode, file); +	if (ret) +		return ret; + +	log = get_log_from_minor(MINOR(inode->i_rdev)); +	if (!log) +		return -ENODEV; + +	if (file->f_mode & FMODE_READ) { +		struct logger_reader *reader; + +		reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL); +		if (!reader) +			return -ENOMEM; + +		reader->log = log; +		reader->r_ver = 1; +		reader->r_all = in_egroup_p(inode->i_gid) || +			capable(CAP_SYSLOG); + +		INIT_LIST_HEAD(&reader->list); + +		mutex_lock(&log->mutex); +		reader->r_off = log->head; +		list_add_tail(&reader->list, &log->readers); +		mutex_unlock(&log->mutex); + +		file->private_data = reader; +	} else +		file->private_data = log; + +	return 0; +} + +/* + * logger_release - the log's release file operation + * + * Note this is a total no-op in the write-only case. Keep it that way! + */ +static int logger_release(struct inode *ignored, struct file *file) +{ +	if (file->f_mode & FMODE_READ) { +		struct logger_reader *reader = file->private_data; +		struct logger_log *log = reader->log; + +		mutex_lock(&log->mutex); +		list_del(&reader->list); +		mutex_unlock(&log->mutex); + +		kfree(reader); +	} + +	return 0; +} + +/* + * logger_poll - the log's poll file operation, for poll/select/epoll + * + * Note we always return POLLOUT, because you can always write() to the log. + * Note also that, strictly speaking, a return value of POLLIN does not + * guarantee that the log is readable without blocking, as there is a small + * chance that the writer can lap the reader in the interim between poll() + * returning and the read() request. + */ +static unsigned int logger_poll(struct file *file, poll_table *wait) +{ +	struct logger_reader *reader; +	struct logger_log *log; +	unsigned int ret = POLLOUT | POLLWRNORM; + +	if (!(file->f_mode & FMODE_READ)) +		return ret; + +	reader = file->private_data; +	log = reader->log; + +	poll_wait(file, &log->wq, wait); + +	mutex_lock(&log->mutex); +	if (!reader->r_all) +		reader->r_off = get_next_entry_by_uid(log, +			reader->r_off, current_euid()); + +	if (log->w_off != reader->r_off) +		ret |= POLLIN | POLLRDNORM; +	mutex_unlock(&log->mutex); + +	return ret; +} + +static long logger_set_version(struct logger_reader *reader, void __user *arg) +{ +	int version; + +	if (copy_from_user(&version, arg, sizeof(int))) +		return -EFAULT; + +	if ((version < 1) || (version > 2)) +		return -EINVAL; + +	reader->r_ver = version; +	return 0; +} + +static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ +	struct logger_log *log = file_get_log(file); +	struct logger_reader *reader; +	long ret = -EINVAL; +	void __user *argp = (void __user *) arg; + +	mutex_lock(&log->mutex); + +	switch (cmd) { +	case LOGGER_GET_LOG_BUF_SIZE: +		ret = log->size; +		break; +	case LOGGER_GET_LOG_LEN: +		if (!(file->f_mode & FMODE_READ)) { +			ret = -EBADF; +			break; +		} +		reader = file->private_data; +		if (log->w_off >= reader->r_off) +			ret = log->w_off - reader->r_off; +		else +			ret = (log->size - reader->r_off) + log->w_off; +		break; +	case LOGGER_GET_NEXT_ENTRY_LEN: +		if (!(file->f_mode & FMODE_READ)) { +			ret = -EBADF; +			break; +		} +		reader = file->private_data; + +		if (!reader->r_all) +			reader->r_off = get_next_entry_by_uid(log, +				reader->r_off, current_euid()); + +		if (log->w_off != reader->r_off) +			ret = get_user_hdr_len(reader->r_ver) + +				get_entry_msg_len(log, reader->r_off); +		else +			ret = 0; +		break; +	case LOGGER_FLUSH_LOG: +		if (!(file->f_mode & FMODE_WRITE)) { +			ret = -EBADF; +			break; +		} +		if (!(in_egroup_p(file_inode(file)->i_gid) || +				capable(CAP_SYSLOG))) { +			ret = -EPERM; +			break; +		} +		list_for_each_entry(reader, &log->readers, list) +			reader->r_off = log->w_off; +		log->head = log->w_off; +		ret = 0; +		break; +	case LOGGER_GET_VERSION: +		if (!(file->f_mode & FMODE_READ)) { +			ret = -EBADF; +			break; +		} +		reader = file->private_data; +		ret = reader->r_ver; +		break; +	case LOGGER_SET_VERSION: +		if (!(file->f_mode & FMODE_READ)) { +			ret = -EBADF; +			break; +		} +		reader = file->private_data; +		ret = logger_set_version(reader, argp); +		break; +	} + +	mutex_unlock(&log->mutex); + +	return ret; +} + +static const struct file_operations logger_fops = { +	.owner = THIS_MODULE, +	.read = logger_read, +	.aio_write = logger_aio_write, +	.poll = logger_poll, +	.unlocked_ioctl = logger_ioctl, +	.compat_ioctl = logger_ioctl, +	.open = logger_open, +	.release = logger_release, +}; + +/* + * Log size must must be a power of two, and greater than + * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)). + */ +static int __init create_log(char *log_name, int size) +{ +	int ret = 0; +	struct logger_log *log; +	unsigned char *buffer; + +	buffer = vmalloc(size); +	if (buffer == NULL) +		return -ENOMEM; + +	log = kzalloc(sizeof(struct logger_log), GFP_KERNEL); +	if (log == NULL) { +		ret = -ENOMEM; +		goto out_free_buffer; +	} +	log->buffer = buffer; + +	log->misc.minor = MISC_DYNAMIC_MINOR; +	log->misc.name = kstrdup(log_name, GFP_KERNEL); +	if (log->misc.name == NULL) { +		ret = -ENOMEM; +		goto out_free_log; +	} + +	log->misc.fops = &logger_fops; +	log->misc.parent = NULL; + +	init_waitqueue_head(&log->wq); +	INIT_LIST_HEAD(&log->readers); +	mutex_init(&log->mutex); +	log->w_off = 0; +	log->head = 0; +	log->size = size; + +	INIT_LIST_HEAD(&log->logs); +	list_add_tail(&log->logs, &log_list); + +	/* finally, initialize the misc device for this log */ +	ret = misc_register(&log->misc); +	if (unlikely(ret)) { +		pr_err("failed to register misc device for log '%s'!\n", +				log->misc.name); +		goto out_free_log; +	} + +	pr_info("created %luK log '%s'\n", +		(unsigned long) log->size >> 10, log->misc.name); + +	return 0; + +out_free_log: +	kfree(log); + +out_free_buffer: +	vfree(buffer); +	return ret; +} + +static int __init logger_init(void) +{ +	int ret; + +	ret = create_log(LOGGER_LOG_MAIN, 256*1024); +	if (unlikely(ret)) +		goto out; + +	ret = create_log(LOGGER_LOG_EVENTS, 256*1024); +	if (unlikely(ret)) +		goto out; + +	ret = create_log(LOGGER_LOG_RADIO, 256*1024); +	if (unlikely(ret)) +		goto out; + +	ret = create_log(LOGGER_LOG_SYSTEM, 256*1024); +	if (unlikely(ret)) +		goto out; + +out: +	return ret; +} + +static void __exit logger_exit(void) +{ +	struct logger_log *current_log, *next_log; + +	list_for_each_entry_safe(current_log, next_log, &log_list, logs) { +		/* we have to delete all the entry inside log_list */ +		misc_deregister(¤t_log->misc); +		vfree(current_log->buffer); +		kfree(current_log->misc.name); +		list_del(¤t_log->logs); +		kfree(current_log); +	} +} + + +device_initcall(logger_init); +module_exit(logger_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Love, <rlove@google.com>"); +MODULE_DESCRIPTION("Android Logger"); diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h new file mode 100644 index 00000000000..70af7d805df --- /dev/null +++ b/drivers/staging/android/logger.h @@ -0,0 +1,89 @@ +/* include/linux/logger.h + * + * Copyright (C) 2007-2008 Google, Inc. + * Author: Robert Love <rlove@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_LOGGER_H +#define _LINUX_LOGGER_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +/** + * struct user_logger_entry_compat - defines a single entry that is given to a logger + * @len:	The length of the payload + * @__pad:	Two bytes of padding that appear to be required + * @pid:	The generating process' process ID + * @tid:	The generating process' thread ID + * @sec:	The number of seconds that have elapsed since the Epoch + * @nsec:	The number of nanoseconds that have elapsed since @sec + * @msg:	The message that is to be logged + * + * The userspace structure for version 1 of the logger_entry ABI. + * This structure is returned to userspace unless the caller requests + * an upgrade to a newer ABI version. + */ +struct user_logger_entry_compat { +	__u16		len; +	__u16		__pad; +	__s32		pid; +	__s32		tid; +	__s32		sec; +	__s32		nsec; +	char		msg[0]; +}; + +/** + * struct logger_entry - defines a single entry that is given to a logger + * @len:	The length of the payload + * @hdr_size:	sizeof(struct logger_entry_v2) + * @pid:	The generating process' process ID + * @tid:	The generating process' thread ID + * @sec:	The number of seconds that have elapsed since the Epoch + * @nsec:	The number of nanoseconds that have elapsed since @sec + * @euid:	Effective UID of logger + * @msg:	The message that is to be logged + * + * The structure for version 2 of the logger_entry ABI. + * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION) + * is called with version >= 2 + */ +struct logger_entry { +	__u16		len; +	__u16		hdr_size; +	__s32		pid; +	__s32		tid; +	__s32		sec; +	__s32		nsec; +	kuid_t		euid; +	char		msg[0]; +}; + +#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */ +#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */ +#define LOGGER_LOG_SYSTEM	"log_system"	/* system/framework messages */ +#define LOGGER_LOG_MAIN		"log_main"	/* everything else */ + +#define LOGGER_ENTRY_MAX_PAYLOAD	4076 + +#define __LOGGERIO	0xAE + +#define LOGGER_GET_LOG_BUF_SIZE		_IO(__LOGGERIO, 1) /* size of log */ +#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */ +#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */ +#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */ +#define LOGGER_GET_VERSION		_IO(__LOGGERIO, 5) /* abi version */ +#define LOGGER_SET_VERSION		_IO(__LOGGERIO, 6) /* abi version */ + +#endif /* _LINUX_LOGGER_H */ diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c new file mode 100644 index 00000000000..b545d3d1da3 --- /dev/null +++ b/drivers/staging/android/lowmemorykiller.c @@ -0,0 +1,202 @@ +/* drivers/misc/lowmemorykiller.c + * + * The lowmemorykiller driver lets user-space specify a set of memory thresholds + * where processes with a range of oom_score_adj values will get killed. Specify + * the minimum oom_score_adj values in + * /sys/module/lowmemorykiller/parameters/adj and the number of free pages in + * /sys/module/lowmemorykiller/parameters/minfree. Both files take a comma + * separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and + * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill + * processes with a oom_score_adj value of 8 or higher when the free memory + * drops below 4096 pages and kill processes with a oom_score_adj value of 0 or + * higher when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate + * and processes may not get killed until the normal oom killer is triggered. + * + * Copyright (C) 2007-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/oom.h> +#include <linux/sched.h> +#include <linux/swap.h> +#include <linux/rcupdate.h> +#include <linux/profile.h> +#include <linux/notifier.h> + +static uint32_t lowmem_debug_level = 1; +static short lowmem_adj[6] = { +	0, +	1, +	6, +	12, +}; +static int lowmem_adj_size = 4; +static int lowmem_minfree[6] = { +	3 * 512,	/* 6MB */ +	2 * 1024,	/* 8MB */ +	4 * 1024,	/* 16MB */ +	16 * 1024,	/* 64MB */ +}; +static int lowmem_minfree_size = 4; + +static unsigned long lowmem_deathpending_timeout; + +#define lowmem_print(level, x...)			\ +	do {						\ +		if (lowmem_debug_level >= (level))	\ +			pr_info(x);			\ +	} while (0) + +static unsigned long lowmem_count(struct shrinker *s, +				  struct shrink_control *sc) +{ +	return global_page_state(NR_ACTIVE_ANON) + +		global_page_state(NR_ACTIVE_FILE) + +		global_page_state(NR_INACTIVE_ANON) + +		global_page_state(NR_INACTIVE_FILE); +} + +static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc) +{ +	struct task_struct *tsk; +	struct task_struct *selected = NULL; +	unsigned long rem = 0; +	int tasksize; +	int i; +	short min_score_adj = OOM_SCORE_ADJ_MAX + 1; +	int selected_tasksize = 0; +	short selected_oom_score_adj; +	int array_size = ARRAY_SIZE(lowmem_adj); +	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages; +	int other_file = global_page_state(NR_FILE_PAGES) - +						global_page_state(NR_SHMEM) - +						total_swapcache_pages(); + +	if (lowmem_adj_size < array_size) +		array_size = lowmem_adj_size; +	if (lowmem_minfree_size < array_size) +		array_size = lowmem_minfree_size; +	for (i = 0; i < array_size; i++) { +		if (other_free < lowmem_minfree[i] && +		    other_file < lowmem_minfree[i]) { +			min_score_adj = lowmem_adj[i]; +			break; +		} +	} + +	lowmem_print(3, "lowmem_scan %lu, %x, ofree %d %d, ma %hd\n", +			sc->nr_to_scan, sc->gfp_mask, other_free, +			other_file, min_score_adj); + +	if (min_score_adj == OOM_SCORE_ADJ_MAX + 1) { +		lowmem_print(5, "lowmem_scan %lu, %x, return 0\n", +			     sc->nr_to_scan, sc->gfp_mask); +		return 0; +	} + +	selected_oom_score_adj = min_score_adj; + +	rcu_read_lock(); +	for_each_process(tsk) { +		struct task_struct *p; +		short oom_score_adj; + +		if (tsk->flags & PF_KTHREAD) +			continue; + +		p = find_lock_task_mm(tsk); +		if (!p) +			continue; + +		if (test_tsk_thread_flag(p, TIF_MEMDIE) && +		    time_before_eq(jiffies, lowmem_deathpending_timeout)) { +			task_unlock(p); +			rcu_read_unlock(); +			return 0; +		} +		oom_score_adj = p->signal->oom_score_adj; +		if (oom_score_adj < min_score_adj) { +			task_unlock(p); +			continue; +		} +		tasksize = get_mm_rss(p->mm); +		task_unlock(p); +		if (tasksize <= 0) +			continue; +		if (selected) { +			if (oom_score_adj < selected_oom_score_adj) +				continue; +			if (oom_score_adj == selected_oom_score_adj && +			    tasksize <= selected_tasksize) +				continue; +		} +		selected = p; +		selected_tasksize = tasksize; +		selected_oom_score_adj = oom_score_adj; +		lowmem_print(2, "select %d (%s), adj %hd, size %d, to kill\n", +			     p->pid, p->comm, oom_score_adj, tasksize); +	} +	if (selected) { +		lowmem_print(1, "send sigkill to %d (%s), adj %hd, size %d\n", +			     selected->pid, selected->comm, +			     selected_oom_score_adj, selected_tasksize); +		lowmem_deathpending_timeout = jiffies + HZ; +		set_tsk_thread_flag(selected, TIF_MEMDIE); +		send_sig(SIGKILL, selected, 0); +		rem += selected_tasksize; +	} + +	lowmem_print(4, "lowmem_scan %lu, %x, return %lu\n", +		     sc->nr_to_scan, sc->gfp_mask, rem); +	rcu_read_unlock(); +	return rem; +} + +static struct shrinker lowmem_shrinker = { +	.scan_objects = lowmem_scan, +	.count_objects = lowmem_count, +	.seeks = DEFAULT_SEEKS * 16 +}; + +static int __init lowmem_init(void) +{ +	register_shrinker(&lowmem_shrinker); +	return 0; +} + +static void __exit lowmem_exit(void) +{ +	unregister_shrinker(&lowmem_shrinker); +} + +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); +module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, +			 S_IRUGO | S_IWUSR); +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, +			 S_IRUGO | S_IWUSR); +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); + +module_init(lowmem_init); +module_exit(lowmem_exit); + +MODULE_LICENSE("GPL"); + diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c new file mode 100644 index 00000000000..12a136ec1ce --- /dev/null +++ b/drivers/staging/android/sw_sync.c @@ -0,0 +1,268 @@ +/* + * drivers/base/sw_sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +#include "sw_sync.h" + +static int sw_sync_cmp(u32 a, u32 b) +{ +	if (a == b) +		return 0; + +	return ((s32)a - (s32)b) < 0 ? -1 : 1; +} + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value) +{ +	struct sw_sync_pt *pt; + +	pt = (struct sw_sync_pt *) +		sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt)); + +	pt->value = value; + +	return (struct sync_pt *)pt; +} +EXPORT_SYMBOL(sw_sync_pt_create); + +static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) +{ +	struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; +	struct sw_sync_timeline *obj = +		(struct sw_sync_timeline *)sync_pt->parent; + +	return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); +} + +static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) +{ +	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; +	struct sw_sync_timeline *obj = +		(struct sw_sync_timeline *)sync_pt->parent; + +	return sw_sync_cmp(obj->value, pt->value) >= 0; +} + +static int sw_sync_pt_compare(struct sync_pt *a, struct sync_pt *b) +{ +	struct sw_sync_pt *pt_a = (struct sw_sync_pt *)a; +	struct sw_sync_pt *pt_b = (struct sw_sync_pt *)b; + +	return sw_sync_cmp(pt_a->value, pt_b->value); +} + +static int sw_sync_fill_driver_data(struct sync_pt *sync_pt, +				    void *data, int size) +{ +	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + +	if (size < sizeof(pt->value)) +		return -ENOMEM; + +	memcpy(data, &pt->value, sizeof(pt->value)); + +	return sizeof(pt->value); +} + +static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline, +				       char *str, int size) +{ +	struct sw_sync_timeline *timeline = +		(struct sw_sync_timeline *)sync_timeline; +	snprintf(str, size, "%d", timeline->value); +} + +static void sw_sync_pt_value_str(struct sync_pt *sync_pt, +				       char *str, int size) +{ +	struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; + +	snprintf(str, size, "%d", pt->value); +} + +static struct sync_timeline_ops sw_sync_timeline_ops = { +	.driver_name = "sw_sync", +	.dup = sw_sync_pt_dup, +	.has_signaled = sw_sync_pt_has_signaled, +	.compare = sw_sync_pt_compare, +	.fill_driver_data = sw_sync_fill_driver_data, +	.timeline_value_str = sw_sync_timeline_value_str, +	.pt_value_str = sw_sync_pt_value_str, +}; + + +struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ +	struct sw_sync_timeline *obj = (struct sw_sync_timeline *) +		sync_timeline_create(&sw_sync_timeline_ops, +				     sizeof(struct sw_sync_timeline), +				     name); + +	return obj; +} +EXPORT_SYMBOL(sw_sync_timeline_create); + +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +	obj->value += inc; + +	sync_timeline_signal(&obj->obj); +} +EXPORT_SYMBOL(sw_sync_timeline_inc); + +#ifdef CONFIG_SW_SYNC_USER +/* *WARNING* + * + * improper use of this can result in deadlocking kernel drivers from userspace. + */ + +/* opening sw_sync create a new sync obj */ +static int sw_sync_open(struct inode *inode, struct file *file) +{ +	struct sw_sync_timeline *obj; +	char task_comm[TASK_COMM_LEN]; + +	get_task_comm(task_comm, current); + +	obj = sw_sync_timeline_create(task_comm); +	if (obj == NULL) +		return -ENOMEM; + +	file->private_data = obj; + +	return 0; +} + +static int sw_sync_release(struct inode *inode, struct file *file) +{ +	struct sw_sync_timeline *obj = file->private_data; + +	sync_timeline_destroy(&obj->obj); +	return 0; +} + +static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj, +				       unsigned long arg) +{ +	int fd = get_unused_fd_flags(O_CLOEXEC); +	int err; +	struct sync_pt *pt; +	struct sync_fence *fence; +	struct sw_sync_create_fence_data data; + +	if (fd < 0) +		return fd; + +	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { +		err = -EFAULT; +		goto err; +	} + +	pt = sw_sync_pt_create(obj, data.value); +	if (pt == NULL) { +		err = -ENOMEM; +		goto err; +	} + +	data.name[sizeof(data.name) - 1] = '\0'; +	fence = sync_fence_create(data.name, pt); +	if (fence == NULL) { +		sync_pt_free(pt); +		err = -ENOMEM; +		goto err; +	} + +	data.fence = fd; +	if (copy_to_user((void __user *)arg, &data, sizeof(data))) { +		sync_fence_put(fence); +		err = -EFAULT; +		goto err; +	} + +	sync_fence_install(fence, fd); + +	return 0; + +err: +	put_unused_fd(fd); +	return err; +} + +static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg) +{ +	u32 value; + +	if (copy_from_user(&value, (void __user *)arg, sizeof(value))) +		return -EFAULT; + +	sw_sync_timeline_inc(obj, value); + +	return 0; +} + +static long sw_sync_ioctl(struct file *file, unsigned int cmd, +			  unsigned long arg) +{ +	struct sw_sync_timeline *obj = file->private_data; + +	switch (cmd) { +	case SW_SYNC_IOC_CREATE_FENCE: +		return sw_sync_ioctl_create_fence(obj, arg); + +	case SW_SYNC_IOC_INC: +		return sw_sync_ioctl_inc(obj, arg); + +	default: +		return -ENOTTY; +	} +} + +static const struct file_operations sw_sync_fops = { +	.owner = THIS_MODULE, +	.open = sw_sync_open, +	.release = sw_sync_release, +	.unlocked_ioctl = sw_sync_ioctl, +	.compat_ioctl = sw_sync_ioctl, +}; + +static struct miscdevice sw_sync_dev = { +	.minor	= MISC_DYNAMIC_MINOR, +	.name	= "sw_sync", +	.fops	= &sw_sync_fops, +}; + +static int __init sw_sync_device_init(void) +{ +	return misc_register(&sw_sync_dev); +} + +static void __exit sw_sync_device_remove(void) +{ +	misc_deregister(&sw_sync_dev); +} + +module_init(sw_sync_device_init); +module_exit(sw_sync_device_remove); + +#endif /* CONFIG_SW_SYNC_USER */ diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h new file mode 100644 index 00000000000..1a50669ec8a --- /dev/null +++ b/drivers/staging/android/sw_sync.h @@ -0,0 +1,59 @@ +/* + * include/linux/sw_sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SW_SYNC_H +#define _LINUX_SW_SYNC_H + +#include <linux/types.h> +#include <linux/kconfig.h> +#include "sync.h" +#include "uapi/sw_sync.h" + +struct sw_sync_timeline { +	struct	sync_timeline	obj; + +	u32			value; +}; + +struct sw_sync_pt { +	struct sync_pt		pt; + +	u32			value; +}; + +#if IS_ENABLED(CONFIG_SW_SYNC) +struct sw_sync_timeline *sw_sync_timeline_create(const char *name); +void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); + +struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ +	return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, +		u32 value) +{ +	return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */ + +#endif /* _LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c new file mode 100644 index 00000000000..18174f7c871 --- /dev/null +++ b/drivers/staging/android/sync.c @@ -0,0 +1,1029 @@ +/* + * drivers/base/sync.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/anon_inodes.h> + +#include "sync.h" + +#define CREATE_TRACE_POINTS +#include "trace/sync.h" + +static void sync_fence_signal_pt(struct sync_pt *pt); +static int _sync_pt_has_signaled(struct sync_pt *pt); +static void sync_fence_free(struct kref *kref); +static void sync_dump(void); + +static LIST_HEAD(sync_timeline_list_head); +static DEFINE_SPINLOCK(sync_timeline_list_lock); + +static LIST_HEAD(sync_fence_list_head); +static DEFINE_SPINLOCK(sync_fence_list_lock); + +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, +					   int size, const char *name) +{ +	struct sync_timeline *obj; +	unsigned long flags; + +	if (size < sizeof(struct sync_timeline)) +		return NULL; + +	obj = kzalloc(size, GFP_KERNEL); +	if (obj == NULL) +		return NULL; + +	kref_init(&obj->kref); +	obj->ops = ops; +	strlcpy(obj->name, name, sizeof(obj->name)); + +	INIT_LIST_HEAD(&obj->child_list_head); +	spin_lock_init(&obj->child_list_lock); + +	INIT_LIST_HEAD(&obj->active_list_head); +	spin_lock_init(&obj->active_list_lock); + +	spin_lock_irqsave(&sync_timeline_list_lock, flags); +	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); +	spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + +	return obj; +} +EXPORT_SYMBOL(sync_timeline_create); + +static void sync_timeline_free(struct kref *kref) +{ +	struct sync_timeline *obj = +		container_of(kref, struct sync_timeline, kref); +	unsigned long flags; + +	spin_lock_irqsave(&sync_timeline_list_lock, flags); +	list_del(&obj->sync_timeline_list); +	spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + +	if (obj->ops->release_obj) +		obj->ops->release_obj(obj); + +	kfree(obj); +} + +void sync_timeline_destroy(struct sync_timeline *obj) +{ +	obj->destroyed = true; +	/* +	 * Ensure timeline is marked as destroyed before +	 * changing timeline's fences status. +	 */ +	smp_wmb(); + +	/* +	 * signal any children that their parent is going away. +	 */ +	sync_timeline_signal(obj); + +	kref_put(&obj->kref, sync_timeline_free); +} +EXPORT_SYMBOL(sync_timeline_destroy); + +static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt) +{ +	unsigned long flags; + +	pt->parent = obj; + +	spin_lock_irqsave(&obj->child_list_lock, flags); +	list_add_tail(&pt->child_list, &obj->child_list_head); +	spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_timeline_remove_pt(struct sync_pt *pt) +{ +	struct sync_timeline *obj = pt->parent; +	unsigned long flags; + +	spin_lock_irqsave(&obj->active_list_lock, flags); +	if (!list_empty(&pt->active_list)) +		list_del_init(&pt->active_list); +	spin_unlock_irqrestore(&obj->active_list_lock, flags); + +	spin_lock_irqsave(&obj->child_list_lock, flags); +	if (!list_empty(&pt->child_list)) +		list_del_init(&pt->child_list); + +	spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +void sync_timeline_signal(struct sync_timeline *obj) +{ +	unsigned long flags; +	LIST_HEAD(signaled_pts); +	struct list_head *pos, *n; + +	trace_sync_timeline(obj); + +	spin_lock_irqsave(&obj->active_list_lock, flags); + +	list_for_each_safe(pos, n, &obj->active_list_head) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, active_list); + +		if (_sync_pt_has_signaled(pt)) { +			list_del_init(pos); +			list_add(&pt->signaled_list, &signaled_pts); +			kref_get(&pt->fence->kref); +		} +	} + +	spin_unlock_irqrestore(&obj->active_list_lock, flags); + +	list_for_each_safe(pos, n, &signaled_pts) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, signaled_list); + +		list_del_init(pos); +		sync_fence_signal_pt(pt); +		kref_put(&pt->fence->kref, sync_fence_free); +	} +} +EXPORT_SYMBOL(sync_timeline_signal); + +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size) +{ +	struct sync_pt *pt; + +	if (size < sizeof(struct sync_pt)) +		return NULL; + +	pt = kzalloc(size, GFP_KERNEL); +	if (pt == NULL) +		return NULL; + +	INIT_LIST_HEAD(&pt->active_list); +	kref_get(&parent->kref); +	sync_timeline_add_pt(parent, pt); + +	return pt; +} +EXPORT_SYMBOL(sync_pt_create); + +void sync_pt_free(struct sync_pt *pt) +{ +	if (pt->parent->ops->free_pt) +		pt->parent->ops->free_pt(pt); + +	sync_timeline_remove_pt(pt); + +	kref_put(&pt->parent->kref, sync_timeline_free); + +	kfree(pt); +} +EXPORT_SYMBOL(sync_pt_free); + +/* call with pt->parent->active_list_lock held */ +static int _sync_pt_has_signaled(struct sync_pt *pt) +{ +	int old_status = pt->status; + +	if (!pt->status) +		pt->status = pt->parent->ops->has_signaled(pt); + +	if (!pt->status && pt->parent->destroyed) +		pt->status = -ENOENT; + +	if (pt->status != old_status) +		pt->timestamp = ktime_get(); + +	return pt->status; +} + +static struct sync_pt *sync_pt_dup(struct sync_pt *pt) +{ +	return pt->parent->ops->dup(pt); +} + +/* Adds a sync pt to the active queue.  Called when added to a fence */ +static void sync_pt_activate(struct sync_pt *pt) +{ +	struct sync_timeline *obj = pt->parent; +	unsigned long flags; +	int err; + +	spin_lock_irqsave(&obj->active_list_lock, flags); + +	err = _sync_pt_has_signaled(pt); +	if (err != 0) +		goto out; + +	list_add_tail(&pt->active_list, &obj->active_list_head); + +out: +	spin_unlock_irqrestore(&obj->active_list_lock, flags); +} + +static int sync_fence_release(struct inode *inode, struct file *file); +static unsigned int sync_fence_poll(struct file *file, poll_table *wait); +static long sync_fence_ioctl(struct file *file, unsigned int cmd, +			     unsigned long arg); + + +static const struct file_operations sync_fence_fops = { +	.release = sync_fence_release, +	.poll = sync_fence_poll, +	.unlocked_ioctl = sync_fence_ioctl, +	.compat_ioctl = sync_fence_ioctl, +}; + +static struct sync_fence *sync_fence_alloc(const char *name) +{ +	struct sync_fence *fence; +	unsigned long flags; + +	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL); +	if (fence == NULL) +		return NULL; + +	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops, +					 fence, 0); +	if (IS_ERR(fence->file)) +		goto err; + +	kref_init(&fence->kref); +	strlcpy(fence->name, name, sizeof(fence->name)); + +	INIT_LIST_HEAD(&fence->pt_list_head); +	INIT_LIST_HEAD(&fence->waiter_list_head); +	spin_lock_init(&fence->waiter_list_lock); + +	init_waitqueue_head(&fence->wq); + +	spin_lock_irqsave(&sync_fence_list_lock, flags); +	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head); +	spin_unlock_irqrestore(&sync_fence_list_lock, flags); + +	return fence; + +err: +	kfree(fence); +	return NULL; +} + +/* TODO: implement a create which takes more that one sync_pt */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) +{ +	struct sync_fence *fence; + +	if (pt->fence) +		return NULL; + +	fence = sync_fence_alloc(name); +	if (fence == NULL) +		return NULL; + +	pt->fence = fence; +	list_add(&pt->pt_list, &fence->pt_list_head); +	sync_pt_activate(pt); + +	/* +	 * signal the fence in case pt was activated before +	 * sync_pt_activate(pt) was called +	 */ +	sync_fence_signal_pt(pt); + +	return fence; +} +EXPORT_SYMBOL(sync_fence_create); + +static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src) +{ +	struct list_head *pos; + +	list_for_each(pos, &src->pt_list_head) { +		struct sync_pt *orig_pt = +			container_of(pos, struct sync_pt, pt_list); +		struct sync_pt *new_pt = sync_pt_dup(orig_pt); + +		if (new_pt == NULL) +			return -ENOMEM; + +		new_pt->fence = dst; +		list_add(&new_pt->pt_list, &dst->pt_list_head); +	} + +	return 0; +} + +static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src) +{ +	struct list_head *src_pos, *dst_pos, *n; + +	list_for_each(src_pos, &src->pt_list_head) { +		struct sync_pt *src_pt = +			container_of(src_pos, struct sync_pt, pt_list); +		bool collapsed = false; + +		list_for_each_safe(dst_pos, n, &dst->pt_list_head) { +			struct sync_pt *dst_pt = +				container_of(dst_pos, struct sync_pt, pt_list); +			/* collapse two sync_pts on the same timeline +			 * to a single sync_pt that will signal at +			 * the later of the two +			 */ +			if (dst_pt->parent == src_pt->parent) { +				if (dst_pt->parent->ops->compare(dst_pt, src_pt) +						 == -1) { +					struct sync_pt *new_pt = +						sync_pt_dup(src_pt); +					if (new_pt == NULL) +						return -ENOMEM; + +					new_pt->fence = dst; +					list_replace(&dst_pt->pt_list, +						     &new_pt->pt_list); +					sync_pt_free(dst_pt); +				} +				collapsed = true; +				break; +			} +		} + +		if (!collapsed) { +			struct sync_pt *new_pt = sync_pt_dup(src_pt); + +			if (new_pt == NULL) +				return -ENOMEM; + +			new_pt->fence = dst; +			list_add(&new_pt->pt_list, &dst->pt_list_head); +		} +	} + +	return 0; +} + +static void sync_fence_detach_pts(struct sync_fence *fence) +{ +	struct list_head *pos, *n; + +	list_for_each_safe(pos, n, &fence->pt_list_head) { +		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + +		sync_timeline_remove_pt(pt); +	} +} + +static void sync_fence_free_pts(struct sync_fence *fence) +{ +	struct list_head *pos, *n; + +	list_for_each_safe(pos, n, &fence->pt_list_head) { +		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); + +		sync_pt_free(pt); +	} +} + +struct sync_fence *sync_fence_fdget(int fd) +{ +	struct file *file = fget(fd); + +	if (file == NULL) +		return NULL; + +	if (file->f_op != &sync_fence_fops) +		goto err; + +	return file->private_data; + +err: +	fput(file); +	return NULL; +} +EXPORT_SYMBOL(sync_fence_fdget); + +void sync_fence_put(struct sync_fence *fence) +{ +	fput(fence->file); +} +EXPORT_SYMBOL(sync_fence_put); + +void sync_fence_install(struct sync_fence *fence, int fd) +{ +	fd_install(fd, fence->file); +} +EXPORT_SYMBOL(sync_fence_install); + +static int sync_fence_get_status(struct sync_fence *fence) +{ +	struct list_head *pos; +	int status = 1; + +	list_for_each(pos, &fence->pt_list_head) { +		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list); +		int pt_status = pt->status; + +		if (pt_status < 0) { +			status = pt_status; +			break; +		} else if (status == 1) { +			status = pt_status; +		} +	} + +	return status; +} + +struct sync_fence *sync_fence_merge(const char *name, +				    struct sync_fence *a, struct sync_fence *b) +{ +	struct sync_fence *fence; +	struct list_head *pos; +	int err; + +	fence = sync_fence_alloc(name); +	if (fence == NULL) +		return NULL; + +	err = sync_fence_copy_pts(fence, a); +	if (err < 0) +		goto err; + +	err = sync_fence_merge_pts(fence, b); +	if (err < 0) +		goto err; + +	list_for_each(pos, &fence->pt_list_head) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, pt_list); +		sync_pt_activate(pt); +	} + +	/* +	 * signal the fence in case one of it's pts were activated before +	 * they were activated +	 */ +	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head, +					      struct sync_pt, +					      pt_list)); + +	return fence; +err: +	sync_fence_free_pts(fence); +	kfree(fence); +	return NULL; +} +EXPORT_SYMBOL(sync_fence_merge); + +static void sync_fence_signal_pt(struct sync_pt *pt) +{ +	LIST_HEAD(signaled_waiters); +	struct sync_fence *fence = pt->fence; +	struct list_head *pos; +	struct list_head *n; +	unsigned long flags; +	int status; + +	status = sync_fence_get_status(fence); + +	spin_lock_irqsave(&fence->waiter_list_lock, flags); +	/* +	 * this should protect against two threads racing on the signaled +	 * false -> true transition +	 */ +	if (status && !fence->status) { +		list_for_each_safe(pos, n, &fence->waiter_list_head) +			list_move(pos, &signaled_waiters); + +		fence->status = status; +	} else { +		status = 0; +	} +	spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + +	if (status) { +		list_for_each_safe(pos, n, &signaled_waiters) { +			struct sync_fence_waiter *waiter = +				container_of(pos, struct sync_fence_waiter, +					     waiter_list); + +			list_del(pos); +			waiter->callback(fence, waiter); +		} +		wake_up(&fence->wq); +	} +} + +int sync_fence_wait_async(struct sync_fence *fence, +			  struct sync_fence_waiter *waiter) +{ +	unsigned long flags; +	int err = 0; + +	spin_lock_irqsave(&fence->waiter_list_lock, flags); + +	if (fence->status) { +		err = fence->status; +		goto out; +	} + +	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head); +out: +	spin_unlock_irqrestore(&fence->waiter_list_lock, flags); + +	return err; +} +EXPORT_SYMBOL(sync_fence_wait_async); + +int sync_fence_cancel_async(struct sync_fence *fence, +			     struct sync_fence_waiter *waiter) +{ +	struct list_head *pos; +	struct list_head *n; +	unsigned long flags; +	int ret = -ENOENT; + +	spin_lock_irqsave(&fence->waiter_list_lock, flags); +	/* +	 * Make sure waiter is still in waiter_list because it is possible for +	 * the waiter to be removed from the list while the callback is still +	 * pending. +	 */ +	list_for_each_safe(pos, n, &fence->waiter_list_head) { +		struct sync_fence_waiter *list_waiter = +			container_of(pos, struct sync_fence_waiter, +				     waiter_list); +		if (list_waiter == waiter) { +			list_del(pos); +			ret = 0; +			break; +		} +	} +	spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +	return ret; +} +EXPORT_SYMBOL(sync_fence_cancel_async); + +static bool sync_fence_check(struct sync_fence *fence) +{ +	/* +	 * Make sure that reads to fence->status are ordered with the +	 * wait queue event triggering +	 */ +	smp_rmb(); +	return fence->status != 0; +} + +int sync_fence_wait(struct sync_fence *fence, long timeout) +{ +	int err = 0; +	struct sync_pt *pt; + +	trace_sync_wait(fence, 1); +	list_for_each_entry(pt, &fence->pt_list_head, pt_list) +		trace_sync_pt(pt); + +	if (timeout > 0) { +		timeout = msecs_to_jiffies(timeout); +		err = wait_event_interruptible_timeout(fence->wq, +						       sync_fence_check(fence), +						       timeout); +	} else if (timeout < 0) { +		err = wait_event_interruptible(fence->wq, +					       sync_fence_check(fence)); +	} +	trace_sync_wait(fence, 0); + +	if (err < 0) +		return err; + +	if (fence->status < 0) { +		pr_info("fence error %d on [%p]\n", fence->status, fence); +		sync_dump(); +		return fence->status; +	} + +	if (fence->status == 0) { +		if (timeout > 0) { +			pr_info("fence timeout on [%p] after %dms\n", fence, +				jiffies_to_msecs(timeout)); +			sync_dump(); +		} +		return -ETIME; +	} + +	return 0; +} +EXPORT_SYMBOL(sync_fence_wait); + +static void sync_fence_free(struct kref *kref) +{ +	struct sync_fence *fence = container_of(kref, struct sync_fence, kref); + +	sync_fence_free_pts(fence); + +	kfree(fence); +} + +static int sync_fence_release(struct inode *inode, struct file *file) +{ +	struct sync_fence *fence = file->private_data; +	unsigned long flags; + +	/* +	 * We need to remove all ways to access this fence before droping +	 * our ref. +	 * +	 * start with its membership in the global fence list +	 */ +	spin_lock_irqsave(&sync_fence_list_lock, flags); +	list_del(&fence->sync_fence_list); +	spin_unlock_irqrestore(&sync_fence_list_lock, flags); + +	/* +	 * remove its pts from their parents so that sync_timeline_signal() +	 * can't reference the fence. +	 */ +	sync_fence_detach_pts(fence); + +	kref_put(&fence->kref, sync_fence_free); + +	return 0; +} + +static unsigned int sync_fence_poll(struct file *file, poll_table *wait) +{ +	struct sync_fence *fence = file->private_data; + +	poll_wait(file, &fence->wq, wait); + +	/* +	 * Make sure that reads to fence->status are ordered with the +	 * wait queue event triggering +	 */ +	smp_rmb(); + +	if (fence->status == 1) +		return POLLIN; +	else if (fence->status < 0) +		return POLLERR; +	else +		return 0; +} + +static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg) +{ +	__s32 value; + +	if (copy_from_user(&value, (void __user *)arg, sizeof(value))) +		return -EFAULT; + +	return sync_fence_wait(fence, value); +} + +static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg) +{ +	int fd = get_unused_fd_flags(O_CLOEXEC); +	int err; +	struct sync_fence *fence2, *fence3; +	struct sync_merge_data data; + +	if (fd < 0) +		return fd; + +	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) { +		err = -EFAULT; +		goto err_put_fd; +	} + +	fence2 = sync_fence_fdget(data.fd2); +	if (fence2 == NULL) { +		err = -ENOENT; +		goto err_put_fd; +	} + +	data.name[sizeof(data.name) - 1] = '\0'; +	fence3 = sync_fence_merge(data.name, fence, fence2); +	if (fence3 == NULL) { +		err = -ENOMEM; +		goto err_put_fence2; +	} + +	data.fence = fd; +	if (copy_to_user((void __user *)arg, &data, sizeof(data))) { +		err = -EFAULT; +		goto err_put_fence3; +	} + +	sync_fence_install(fence3, fd); +	sync_fence_put(fence2); +	return 0; + +err_put_fence3: +	sync_fence_put(fence3); + +err_put_fence2: +	sync_fence_put(fence2); + +err_put_fd: +	put_unused_fd(fd); +	return err; +} + +static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size) +{ +	struct sync_pt_info *info = data; +	int ret; + +	if (size < sizeof(struct sync_pt_info)) +		return -ENOMEM; + +	info->len = sizeof(struct sync_pt_info); + +	if (pt->parent->ops->fill_driver_data) { +		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data, +							size - sizeof(*info)); +		if (ret < 0) +			return ret; + +		info->len += ret; +	} + +	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name)); +	strlcpy(info->driver_name, pt->parent->ops->driver_name, +		sizeof(info->driver_name)); +	info->status = pt->status; +	info->timestamp_ns = ktime_to_ns(pt->timestamp); + +	return info->len; +} + +static long sync_fence_ioctl_fence_info(struct sync_fence *fence, +					unsigned long arg) +{ +	struct sync_fence_info_data *data; +	struct list_head *pos; +	__u32 size; +	__u32 len = 0; +	int ret; + +	if (copy_from_user(&size, (void __user *)arg, sizeof(size))) +		return -EFAULT; + +	if (size < sizeof(struct sync_fence_info_data)) +		return -EINVAL; + +	if (size > 4096) +		size = 4096; + +	data = kzalloc(size, GFP_KERNEL); +	if (data == NULL) +		return -ENOMEM; + +	strlcpy(data->name, fence->name, sizeof(data->name)); +	data->status = fence->status; +	len = sizeof(struct sync_fence_info_data); + +	list_for_each(pos, &fence->pt_list_head) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, pt_list); + +		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len); + +		if (ret < 0) +			goto out; + +		len += ret; +	} + +	data->len = len; + +	if (copy_to_user((void __user *)arg, data, len)) +		ret = -EFAULT; +	else +		ret = 0; + +out: +	kfree(data); + +	return ret; +} + +static long sync_fence_ioctl(struct file *file, unsigned int cmd, +			     unsigned long arg) +{ +	struct sync_fence *fence = file->private_data; + +	switch (cmd) { +	case SYNC_IOC_WAIT: +		return sync_fence_ioctl_wait(fence, arg); + +	case SYNC_IOC_MERGE: +		return sync_fence_ioctl_merge(fence, arg); + +	case SYNC_IOC_FENCE_INFO: +		return sync_fence_ioctl_fence_info(fence, arg); + +	default: +		return -ENOTTY; +	} +} + +#ifdef CONFIG_DEBUG_FS +static const char *sync_status_str(int status) +{ +	if (status > 0) +		return "signaled"; +	else if (status == 0) +		return "active"; +	else +		return "error"; +} + +static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence) +{ +	int status = pt->status; + +	seq_printf(s, "  %s%spt %s", +		   fence ? pt->parent->name : "", +		   fence ? "_" : "", +		   sync_status_str(status)); +	if (pt->status) { +		struct timeval tv = ktime_to_timeval(pt->timestamp); + +		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec); +	} + +	if (pt->parent->ops->timeline_value_str && +	    pt->parent->ops->pt_value_str) { +		char value[64]; + +		pt->parent->ops->pt_value_str(pt, value, sizeof(value)); +		seq_printf(s, ": %s", value); +		if (fence) { +			pt->parent->ops->timeline_value_str(pt->parent, value, +						    sizeof(value)); +			seq_printf(s, " / %s", value); +		} +	} else if (pt->parent->ops->print_pt) { +		seq_puts(s, ": "); +		pt->parent->ops->print_pt(s, pt); +	} + +	seq_puts(s, "\n"); +} + +static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) +{ +	struct list_head *pos; +	unsigned long flags; + +	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name); + +	if (obj->ops->timeline_value_str) { +		char value[64]; + +		obj->ops->timeline_value_str(obj, value, sizeof(value)); +		seq_printf(s, ": %s", value); +	} else if (obj->ops->print_obj) { +		seq_puts(s, ": "); +		obj->ops->print_obj(s, obj); +	} + +	seq_puts(s, "\n"); + +	spin_lock_irqsave(&obj->child_list_lock, flags); +	list_for_each(pos, &obj->child_list_head) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, child_list); +		sync_print_pt(s, pt, false); +	} +	spin_unlock_irqrestore(&obj->child_list_lock, flags); +} + +static void sync_print_fence(struct seq_file *s, struct sync_fence *fence) +{ +	struct list_head *pos; +	unsigned long flags; + +	seq_printf(s, "[%p] %s: %s\n", fence, fence->name, +		   sync_status_str(fence->status)); + +	list_for_each(pos, &fence->pt_list_head) { +		struct sync_pt *pt = +			container_of(pos, struct sync_pt, pt_list); +		sync_print_pt(s, pt, true); +	} + +	spin_lock_irqsave(&fence->waiter_list_lock, flags); +	list_for_each(pos, &fence->waiter_list_head) { +		struct sync_fence_waiter *waiter = +			container_of(pos, struct sync_fence_waiter, +				     waiter_list); + +		seq_printf(s, "waiter %pF\n", waiter->callback); +	} +	spin_unlock_irqrestore(&fence->waiter_list_lock, flags); +} + +static int sync_debugfs_show(struct seq_file *s, void *unused) +{ +	unsigned long flags; +	struct list_head *pos; + +	seq_puts(s, "objs:\n--------------\n"); + +	spin_lock_irqsave(&sync_timeline_list_lock, flags); +	list_for_each(pos, &sync_timeline_list_head) { +		struct sync_timeline *obj = +			container_of(pos, struct sync_timeline, +				     sync_timeline_list); + +		sync_print_obj(s, obj); +		seq_puts(s, "\n"); +	} +	spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + +	seq_puts(s, "fences:\n--------------\n"); + +	spin_lock_irqsave(&sync_fence_list_lock, flags); +	list_for_each(pos, &sync_fence_list_head) { +		struct sync_fence *fence = +			container_of(pos, struct sync_fence, sync_fence_list); + +		sync_print_fence(s, fence); +		seq_puts(s, "\n"); +	} +	spin_unlock_irqrestore(&sync_fence_list_lock, flags); +	return 0; +} + +static int sync_debugfs_open(struct inode *inode, struct file *file) +{ +	return single_open(file, sync_debugfs_show, inode->i_private); +} + +static const struct file_operations sync_debugfs_fops = { +	.open           = sync_debugfs_open, +	.read           = seq_read, +	.llseek         = seq_lseek, +	.release        = single_release, +}; + +static __init int sync_debugfs_init(void) +{ +	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops); +	return 0; +} +late_initcall(sync_debugfs_init); + +#define DUMP_CHUNK 256 +static char sync_dump_buf[64 * 1024]; +static void sync_dump(void) +{ +	struct seq_file s = { +		.buf = sync_dump_buf, +		.size = sizeof(sync_dump_buf) - 1, +	}; +	int i; + +	sync_debugfs_show(&s, NULL); + +	for (i = 0; i < s.count; i += DUMP_CHUNK) { +		if ((s.count - i) > DUMP_CHUNK) { +			char c = s.buf[i + DUMP_CHUNK]; + +			s.buf[i + DUMP_CHUNK] = 0; +			pr_cont("%s", s.buf + i); +			s.buf[i + DUMP_CHUNK] = c; +		} else { +			s.buf[s.count] = 0; +			pr_cont("%s", s.buf + i); +		} +	} +} +#else +static void sync_dump(void) +{ +} +#endif diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h new file mode 100644 index 00000000000..eaf57cccf62 --- /dev/null +++ b/drivers/staging/android/sync.h @@ -0,0 +1,344 @@ +/* + * include/linux/sync.h + * + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_SYNC_H +#define _LINUX_SYNC_H + +#include <linux/types.h> +#include <linux/kref.h> +#include <linux/ktime.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/wait.h> + +#include "uapi/sync.h" + +struct sync_timeline; +struct sync_pt; +struct sync_fence; + +/** + * struct sync_timeline_ops - sync object implementation ops + * @driver_name:	name of the implementation + * @dup:		duplicate a sync_pt + * @has_signaled:	returns: + *			  1 if pt has signaled + *			  0 if pt has not signaled + *			 <0 on error + * @compare:		returns: + *			  1 if b will signal before a + *			  0 if a and b will signal at the same time + *			 -1 if a will signal before b + * @free_pt:		called before sync_pt is freed + * @release_obj:	called before sync_timeline is freed + * @print_obj:		deprecated + * @print_pt:		deprecated + * @fill_driver_data:	write implementation specific driver data to data. + *			  should return an error if there is not enough room + *			  as specified by size.  This information is returned + *			  to userspace by SYNC_IOC_FENCE_INFO. + * @timeline_value_str: fill str with the value of the sync_timeline's counter + * @pt_value_str:	fill str with the value of the sync_pt + */ +struct sync_timeline_ops { +	const char *driver_name; + +	/* required */ +	struct sync_pt * (*dup)(struct sync_pt *pt); + +	/* required */ +	int (*has_signaled)(struct sync_pt *pt); + +	/* required */ +	int (*compare)(struct sync_pt *a, struct sync_pt *b); + +	/* optional */ +	void (*free_pt)(struct sync_pt *sync_pt); + +	/* optional */ +	void (*release_obj)(struct sync_timeline *sync_timeline); + +	/* deprecated */ +	void (*print_obj)(struct seq_file *s, +			  struct sync_timeline *sync_timeline); + +	/* deprecated */ +	void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt); + +	/* optional */ +	int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); + +	/* optional */ +	void (*timeline_value_str)(struct sync_timeline *timeline, char *str, +				   int size); + +	/* optional */ +	void (*pt_value_str)(struct sync_pt *pt, char *str, int size); +}; + +/** + * struct sync_timeline - sync object + * @kref:		reference count on fence. + * @ops:		ops that define the implementation of the sync_timeline + * @name:		name of the sync_timeline. Useful for debugging + * @destroyed:		set when sync_timeline is destroyed + * @child_list_head:	list of children sync_pts for this sync_timeline + * @child_list_lock:	lock protecting @child_list_head, destroyed, and + *			  sync_pt.status + * @active_list_head:	list of active (unsignaled/errored) sync_pts + * @sync_timeline_list:	membership in global sync_timeline_list + */ +struct sync_timeline { +	struct kref		kref; +	const struct sync_timeline_ops	*ops; +	char			name[32]; + +	/* protected by child_list_lock */ +	bool			destroyed; + +	struct list_head	child_list_head; +	spinlock_t		child_list_lock; + +	struct list_head	active_list_head; +	spinlock_t		active_list_lock; + +	struct list_head	sync_timeline_list; +}; + +/** + * struct sync_pt - sync point + * @parent:		sync_timeline to which this sync_pt belongs + * @child_list:		membership in sync_timeline.child_list_head + * @active_list:	membership in sync_timeline.active_list_head + * @signaled_list:	membership in temporary signaled_list on stack + * @fence:		sync_fence to which the sync_pt belongs + * @pt_list:		membership in sync_fence.pt_list_head + * @status:		1: signaled, 0:active, <0: error + * @timestamp:		time which sync_pt status transitioned from active to + *			  signaled or error. + */ +struct sync_pt { +	struct sync_timeline		*parent; +	struct list_head	child_list; + +	struct list_head	active_list; +	struct list_head	signaled_list; + +	struct sync_fence	*fence; +	struct list_head	pt_list; + +	/* protected by parent->active_list_lock */ +	int			status; + +	ktime_t			timestamp; +}; + +/** + * struct sync_fence - sync fence + * @file:		file representing this fence + * @kref:		reference count on fence. + * @name:		name of sync_fence.  Useful for debugging + * @pt_list_head:	list of sync_pts in the fence.  immutable once fence + *			  is created + * @waiter_list_head:	list of asynchronous waiters on this fence + * @waiter_list_lock:	lock protecting @waiter_list_head and @status + * @status:		1: signaled, 0:active, <0: error + * + * @wq:			wait queue for fence signaling + * @sync_fence_list:	membership in global fence list + */ +struct sync_fence { +	struct file		*file; +	struct kref		kref; +	char			name[32]; + +	/* this list is immutable once the fence is created */ +	struct list_head	pt_list_head; + +	struct list_head	waiter_list_head; +	spinlock_t		waiter_list_lock; /* also protects status */ +	int			status; + +	wait_queue_head_t	wq; + +	struct list_head	sync_fence_list; +}; + +struct sync_fence_waiter; +typedef void (*sync_callback_t)(struct sync_fence *fence, +				struct sync_fence_waiter *waiter); + +/** + * struct sync_fence_waiter - metadata for asynchronous waiter on a fence + * @waiter_list:	membership in sync_fence.waiter_list_head + * @callback:		function pointer to call when fence signals + * @callback_data:	pointer to pass to @callback + */ +struct sync_fence_waiter { +	struct list_head	waiter_list; + +	sync_callback_t		callback; +}; + +static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, +					  sync_callback_t callback) +{ +	waiter->callback = callback; +} + +/* + * API for sync_timeline implementers + */ + +/** + * sync_timeline_create() - creates a sync object + * @ops:	specifies the implementation ops for the object + * @size:	size to allocate for this obj + * @name:	sync_timeline name + * + * Creates a new sync_timeline which will use the implementation specified by + * @ops.  @size bytes will be allocated allowing for implementation specific + * data to be kept after the generic sync_timeline struct. + */ +struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, +					   int size, const char *name); + +/** + * sync_timeline_destroy() - destroys a sync object + * @obj:	sync_timeline to destroy + * + * A sync implementation should call this when the @obj is going away + * (i.e. module unload.)  @obj won't actually be freed until all its children + * sync_pts are freed. + */ +void sync_timeline_destroy(struct sync_timeline *obj); + +/** + * sync_timeline_signal() - signal a status change on a sync_timeline + * @obj:	sync_timeline to signal + * + * A sync implementation should call this any time one of it's sync_pts + * has signaled or has an error condition. + */ +void sync_timeline_signal(struct sync_timeline *obj); + +/** + * sync_pt_create() - creates a sync pt + * @parent:	sync_pt's parent sync_timeline + * @size:	size to allocate for this pt + * + * Creates a new sync_pt as a child of @parent.  @size bytes will be + * allocated allowing for implementation specific data to be kept after + * the generic sync_timeline struct. + */ +struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); + +/** + * sync_pt_free() - frees a sync pt + * @pt:		sync_pt to free + * + * This should only be called on sync_pts which have been created but + * not added to a fence. + */ +void sync_pt_free(struct sync_pt *pt); + +/** + * sync_fence_create() - creates a sync fence + * @name:	name of fence to create + * @pt:		sync_pt to add to the fence + * + * Creates a fence containg @pt.  Once this is called, the fence takes + * ownership of @pt. + */ +struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt); + +/* + * API for sync_fence consumers + */ + +/** + * sync_fence_merge() - merge two fences + * @name:	name of new fence + * @a:		fence a + * @b:		fence b + * + * Creates a new fence which contains copies of all the sync_pts in both + * @a and @b.  @a and @b remain valid, independent fences. + */ +struct sync_fence *sync_fence_merge(const char *name, +				    struct sync_fence *a, struct sync_fence *b); + +/** + * sync_fence_fdget() - get a fence from an fd + * @fd:		fd referencing a fence + * + * Ensures @fd references a valid fence, increments the refcount of the backing + * file, and returns the fence. + */ +struct sync_fence *sync_fence_fdget(int fd); + +/** + * sync_fence_put() - puts a reference of a sync fence + * @fence:	fence to put + * + * Puts a reference on @fence.  If this is the last reference, the fence and + * all it's sync_pts will be freed + */ +void sync_fence_put(struct sync_fence *fence); + +/** + * sync_fence_install() - installs a fence into a file descriptor + * @fence:	fence to install + * @fd:		file descriptor in which to install the fence + * + * Installs @fence into @fd.  @fd's should be acquired through get_unused_fd(). + */ +void sync_fence_install(struct sync_fence *fence, int fd); + +/** + * sync_fence_wait_async() - registers and async wait on the fence + * @fence:		fence to wait on + * @waiter:		waiter callback struck + * + * Returns 1 if @fence has already signaled. + * + * Registers a callback to be called when @fence signals or has an error. + * @waiter should be initialized with sync_fence_waiter_init(). + */ +int sync_fence_wait_async(struct sync_fence *fence, +			  struct sync_fence_waiter *waiter); + +/** + * sync_fence_cancel_async() - cancels an async wait + * @fence:		fence to wait on + * @waiter:		waiter callback struck + * + * returns 0 if waiter was removed from fence's async waiter list. + * returns -ENOENT if waiter was not found on fence's async waiter list. + * + * Cancels a previously registered async wait.  Will fail gracefully if + * @waiter was never registered or if @fence has already signaled @waiter. + */ +int sync_fence_cancel_async(struct sync_fence *fence, +			    struct sync_fence_waiter *waiter); + +/** + * sync_fence_wait() - wait on fence + * @fence:	fence to wait on + * @tiemout:	timeout in ms + * + * Wait for @fence to be signaled or have an error.  Waits indefinitely + * if @timeout < 0 + */ +int sync_fence_wait(struct sync_fence *fence, long timeout); + +#endif /* _LINUX_SYNC_H */ diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c new file mode 100644 index 00000000000..180c209a009 --- /dev/null +++ b/drivers/staging/android/timed_gpio.c @@ -0,0 +1,167 @@ +/* drivers/misc/timed_gpio.c + * + * Copyright (C) 2008 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/hrtimer.h> +#include <linux/err.h> +#include <linux/gpio.h> + +#include "timed_output.h" +#include "timed_gpio.h" + + +struct timed_gpio_data { +	struct timed_output_dev dev; +	struct hrtimer timer; +	spinlock_t lock; +	unsigned gpio; +	int max_timeout; +	u8 active_low; +}; + +static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) +{ +	struct timed_gpio_data *data = +		container_of(timer, struct timed_gpio_data, timer); + +	gpio_direction_output(data->gpio, data->active_low ? 1 : 0); +	return HRTIMER_NORESTART; +} + +static int gpio_get_time(struct timed_output_dev *dev) +{ +	struct timed_gpio_data	*data = +		container_of(dev, struct timed_gpio_data, dev); + +	if (hrtimer_active(&data->timer)) { +		ktime_t r = hrtimer_get_remaining(&data->timer); +		struct timeval t = ktime_to_timeval(r); + +		return t.tv_sec * 1000 + t.tv_usec / 1000; +	} else +		return 0; +} + +static void gpio_enable(struct timed_output_dev *dev, int value) +{ +	struct timed_gpio_data	*data = +		container_of(dev, struct timed_gpio_data, dev); +	unsigned long	flags; + +	spin_lock_irqsave(&data->lock, flags); + +	/* cancel previous timer and set GPIO according to value */ +	hrtimer_cancel(&data->timer); +	gpio_direction_output(data->gpio, data->active_low ? !value : !!value); + +	if (value > 0) { +		if (value > data->max_timeout) +			value = data->max_timeout; + +		hrtimer_start(&data->timer, +			ktime_set(value / 1000, (value % 1000) * 1000000), +			HRTIMER_MODE_REL); +	} + +	spin_unlock_irqrestore(&data->lock, flags); +} + +static int timed_gpio_probe(struct platform_device *pdev) +{ +	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; +	struct timed_gpio *cur_gpio; +	struct timed_gpio_data *gpio_data, *gpio_dat; +	int i, ret; + +	if (!pdata) +		return -EBUSY; + +	gpio_data = devm_kzalloc(&pdev->dev, +			sizeof(struct timed_gpio_data) * pdata->num_gpios, +			GFP_KERNEL); +	if (!gpio_data) +		return -ENOMEM; + +	for (i = 0; i < pdata->num_gpios; i++) { +		cur_gpio = &pdata->gpios[i]; +		gpio_dat = &gpio_data[i]; + +		hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, +				HRTIMER_MODE_REL); +		gpio_dat->timer.function = gpio_timer_func; +		spin_lock_init(&gpio_dat->lock); + +		gpio_dat->dev.name = cur_gpio->name; +		gpio_dat->dev.get_time = gpio_get_time; +		gpio_dat->dev.enable = gpio_enable; +		ret = gpio_request(cur_gpio->gpio, cur_gpio->name); +		if (ret < 0) +			goto err_out; +		ret = timed_output_dev_register(&gpio_dat->dev); +		if (ret < 0) { +			gpio_free(cur_gpio->gpio); +			goto err_out; +		} + +		gpio_dat->gpio = cur_gpio->gpio; +		gpio_dat->max_timeout = cur_gpio->max_timeout; +		gpio_dat->active_low = cur_gpio->active_low; +		gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low); +	} + +	platform_set_drvdata(pdev, gpio_data); + +	return 0; + +err_out: +	while (--i >= 0) { +		timed_output_dev_unregister(&gpio_data[i].dev); +		gpio_free(gpio_data[i].gpio); +	} + +	return ret; +} + +static int timed_gpio_remove(struct platform_device *pdev) +{ +	struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; +	struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); +	int i; + +	for (i = 0; i < pdata->num_gpios; i++) { +		timed_output_dev_unregister(&gpio_data[i].dev); +		gpio_free(gpio_data[i].gpio); +	} + +	return 0; +} + +static struct platform_driver timed_gpio_driver = { +	.probe		= timed_gpio_probe, +	.remove		= timed_gpio_remove, +	.driver		= { +		.name		= TIMED_GPIO_NAME, +		.owner		= THIS_MODULE, +	}, +}; + +module_platform_driver(timed_gpio_driver); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("timed gpio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h new file mode 100644 index 00000000000..d29e169d7eb --- /dev/null +++ b/drivers/staging/android/timed_gpio.h @@ -0,0 +1,33 @@ +/* include/linux/timed_gpio.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_GPIO_H +#define _LINUX_TIMED_GPIO_H + +#define TIMED_GPIO_NAME "timed-gpio" + +struct timed_gpio { +	const char *name; +	unsigned	gpio; +	int		max_timeout; +	u8		active_low; +}; + +struct timed_gpio_platform_data { +	int		num_gpios; +	struct timed_gpio *gpios; +}; + +#endif diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c new file mode 100644 index 00000000000..c341ac11c5a --- /dev/null +++ b/drivers/staging/android/timed_output.c @@ -0,0 +1,118 @@ +/* drivers/misc/timed_output.c + * + * Copyright (C) 2009 Google, Inc. + * Author: Mike Lockwood <lockwood@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "timed_output: " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/err.h> + +#include "timed_output.h" + +static struct class *timed_output_class; +static atomic_t device_count; + +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, +			   char *buf) +{ +	struct timed_output_dev *tdev = dev_get_drvdata(dev); +	int remaining = tdev->get_time(tdev); + +	return sprintf(buf, "%d\n", remaining); +} + +static ssize_t enable_store(struct device *dev, struct device_attribute *attr, +			    const char *buf, size_t size) +{ +	struct timed_output_dev *tdev = dev_get_drvdata(dev); +	int value; + +	if (sscanf(buf, "%d", &value) != 1) +		return -EINVAL; + +	tdev->enable(tdev, value); + +	return size; +} +static DEVICE_ATTR_RW(enable); + +static struct attribute *timed_output_attrs[] = { +	&dev_attr_enable.attr, +	NULL, +}; +ATTRIBUTE_GROUPS(timed_output); + +static int create_timed_output_class(void) +{ +	if (!timed_output_class) { +		timed_output_class = class_create(THIS_MODULE, "timed_output"); +		if (IS_ERR(timed_output_class)) +			return PTR_ERR(timed_output_class); +		atomic_set(&device_count, 0); +		timed_output_class->dev_groups = timed_output_groups; +	} + +	return 0; +} + +int timed_output_dev_register(struct timed_output_dev *tdev) +{ +	int ret; + +	if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time) +		return -EINVAL; + +	ret = create_timed_output_class(); +	if (ret < 0) +		return ret; + +	tdev->index = atomic_inc_return(&device_count); +	tdev->dev = device_create(timed_output_class, NULL, +		MKDEV(0, tdev->index), NULL, "%s", tdev->name); +	if (IS_ERR(tdev->dev)) +		return PTR_ERR(tdev->dev); + +	dev_set_drvdata(tdev->dev, tdev); +	tdev->state = 0; +	return 0; +} +EXPORT_SYMBOL_GPL(timed_output_dev_register); + +void timed_output_dev_unregister(struct timed_output_dev *tdev) +{ +	tdev->enable(tdev, 0); +	device_destroy(timed_output_class, MKDEV(0, tdev->index)); +} +EXPORT_SYMBOL_GPL(timed_output_dev_unregister); + +static int __init timed_output_init(void) +{ +	return create_timed_output_class(); +} + +static void __exit timed_output_exit(void) +{ +	class_destroy(timed_output_class); +} + +module_init(timed_output_init); +module_exit(timed_output_exit); + +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); +MODULE_DESCRIPTION("timed output class driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h new file mode 100644 index 00000000000..13d2ca51cbe --- /dev/null +++ b/drivers/staging/android/timed_output.h @@ -0,0 +1,37 @@ +/* include/linux/timed_output.h + * + * Copyright (C) 2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * +*/ + +#ifndef _LINUX_TIMED_OUTPUT_H +#define _LINUX_TIMED_OUTPUT_H + +struct timed_output_dev { +	const char	*name; + +	/* enable the output and set the timer */ +	void (*enable)(struct timed_output_dev *sdev, int timeout); + +	/* returns the current number of milliseconds remaining on the timer */ +	int (*get_time)(struct timed_output_dev *sdev); + +	/* private data */ +	struct device	*dev; +	int		index; +	int		state; +}; + +int timed_output_dev_register(struct timed_output_dev *dev); +void timed_output_dev_unregister(struct timed_output_dev *dev); + +#endif diff --git a/drivers/staging/android/trace/sync.h b/drivers/staging/android/trace/sync.h new file mode 100644 index 00000000000..95462359ba5 --- /dev/null +++ b/drivers/staging/android/trace/sync.h @@ -0,0 +1,82 @@ +#undef TRACE_SYSTEM +#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace +#define TRACE_SYSTEM sync + +#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SYNC_H + +#include "../sync.h" +#include <linux/tracepoint.h> + +TRACE_EVENT(sync_timeline, +	TP_PROTO(struct sync_timeline *timeline), + +	TP_ARGS(timeline), + +	TP_STRUCT__entry( +			__string(name, timeline->name) +			__array(char, value, 32) +	), + +	TP_fast_assign( +			__assign_str(name, timeline->name); +			if (timeline->ops->timeline_value_str) { +				timeline->ops->timeline_value_str(timeline, +							__entry->value, +							sizeof(__entry->value)); +			} else { +				__entry->value[0] = '\0'; +			} +	), + +	TP_printk("name=%s value=%s", __get_str(name), __entry->value) +); + +TRACE_EVENT(sync_wait, +	TP_PROTO(struct sync_fence *fence, int begin), + +	TP_ARGS(fence, begin), + +	TP_STRUCT__entry( +			__string(name, fence->name) +			__field(s32, status) +			__field(u32, begin) +	), + +	TP_fast_assign( +			__assign_str(name, fence->name); +			__entry->status = fence->status; +			__entry->begin = begin; +	), + +	TP_printk("%s name=%s state=%d", __entry->begin ? "begin" : "end", +			__get_str(name), __entry->status) +); + +TRACE_EVENT(sync_pt, +	TP_PROTO(struct sync_pt *pt), + +	TP_ARGS(pt), + +	TP_STRUCT__entry( +		__string(timeline, pt->parent->name) +		__array(char, value, 32) +	), + +	TP_fast_assign( +		__assign_str(timeline, pt->parent->name); +		if (pt->parent->ops->pt_value_str) { +			pt->parent->ops->pt_value_str(pt, __entry->value, +							sizeof(__entry->value)); +		} else { +			__entry->value[0] = '\0'; +		} +	), + +	TP_printk("name=%s value=%s", __get_str(timeline), __entry->value) +); + +#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/drivers/staging/android/uapi/android_alarm.h b/drivers/staging/android/uapi/android_alarm.h new file mode 100644 index 00000000000..aa013f6f5f3 --- /dev/null +++ b/drivers/staging/android/uapi/android_alarm.h @@ -0,0 +1,62 @@ +/* drivers/staging/android/uapi/android_alarm.h + * + * Copyright (C) 2006-2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ANDROID_ALARM_H +#define _UAPI_LINUX_ANDROID_ALARM_H + +#include <linux/ioctl.h> +#include <linux/time.h> + +enum android_alarm_type { +	/* return code bit numbers or set alarm arg */ +	ANDROID_ALARM_RTC_WAKEUP, +	ANDROID_ALARM_RTC, +	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, +	ANDROID_ALARM_ELAPSED_REALTIME, +	ANDROID_ALARM_SYSTEMTIME, + +	ANDROID_ALARM_TYPE_COUNT, + +	/* return code bit numbers */ +	/* ANDROID_ALARM_TIME_CHANGE = 16 */ +}; + +enum android_alarm_return_flags { +	ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, +	ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, +	ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = +				1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, +	ANDROID_ALARM_ELAPSED_REALTIME_MASK = +				1U << ANDROID_ALARM_ELAPSED_REALTIME, +	ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, +	ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 +}; + +/* Disable alarm */ +#define ANDROID_ALARM_CLEAR(type)           _IO('a', 0 | ((type) << 4)) + +/* Ack last alarm and wait for next */ +#define ANDROID_ALARM_WAIT                  _IO('a', 1) + +#define ALARM_IOW(c, type, size)            _IOW('a', (c) | ((type) << 4), size) +/* Set alarm */ +#define ANDROID_ALARM_SET(type)             ALARM_IOW(2, type, struct timespec) +#define ANDROID_ALARM_SET_AND_WAIT(type)    ALARM_IOW(3, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type)        ALARM_IOW(4, type, struct timespec) +#define ANDROID_ALARM_SET_RTC               _IOW('a', 5, struct timespec) +#define ANDROID_ALARM_BASE_CMD(cmd)         (cmd & ~(_IOC(0, 0, 0xf0, 0))) +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd)    (_IOC_NR(cmd) >> 4) + +#endif diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h new file mode 100644 index 00000000000..ba4743c71d6 --- /dev/null +++ b/drivers/staging/android/uapi/ashmem.h @@ -0,0 +1,47 @@ +/* + * drivers/staging/android/uapi/ashmem.h + * + * Copyright 2008 Google Inc. + * Author: Robert Love + * + * This file is dual licensed.  It may be redistributed and/or modified + * under the terms of the Apache 2.0 License OR version 2 of the GNU + * General Public License. + */ + +#ifndef _UAPI_LINUX_ASHMEM_H +#define _UAPI_LINUX_ASHMEM_H + +#include <linux/ioctl.h> + +#define ASHMEM_NAME_LEN		256 + +#define ASHMEM_NAME_DEF		"dev/ashmem" + +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ +#define ASHMEM_NOT_PURGED	0 +#define ASHMEM_WAS_PURGED	1 + +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ +#define ASHMEM_IS_UNPINNED	0 +#define ASHMEM_IS_PINNED	1 + +struct ashmem_pin { +	__u32 offset;	/* offset into region, in bytes, page-aligned */ +	__u32 len;	/* length forward from offset, in bytes, page-aligned */ +}; + +#define __ASHMEMIOC		0x77 + +#define ASHMEM_SET_NAME		_IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) +#define ASHMEM_GET_NAME		_IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) +#define ASHMEM_SET_SIZE		_IOW(__ASHMEMIOC, 3, size_t) +#define ASHMEM_GET_SIZE		_IO(__ASHMEMIOC, 4) +#define ASHMEM_SET_PROT_MASK	_IOW(__ASHMEMIOC, 5, unsigned long) +#define ASHMEM_GET_PROT_MASK	_IO(__ASHMEMIOC, 6) +#define ASHMEM_PIN		_IOW(__ASHMEMIOC, 7, struct ashmem_pin) +#define ASHMEM_UNPIN		_IOW(__ASHMEMIOC, 8, struct ashmem_pin) +#define ASHMEM_GET_PIN_STATUS	_IO(__ASHMEMIOC, 9) +#define ASHMEM_PURGE_ALL_CACHES	_IO(__ASHMEMIOC, 10) + +#endif	/* _UAPI_LINUX_ASHMEM_H */ diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h new file mode 100644 index 00000000000..904adb7600c --- /dev/null +++ b/drivers/staging/android/uapi/binder.h @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2008 Google, Inc. + * + * Based on, but no longer compatible with, the original + * OpenBinder.org binder driver interface, which is: + * + * Copyright (c) 2005 Palmsource, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_BINDER_H +#define _UAPI_LINUX_BINDER_H + +#include <linux/ioctl.h> + +#define B_PACK_CHARS(c1, c2, c3, c4) \ +	((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) +#define B_TYPE_LARGE 0x85 + +enum { +	BINDER_TYPE_BINDER	= B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), +	BINDER_TYPE_WEAK_BINDER	= B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), +	BINDER_TYPE_HANDLE	= B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), +	BINDER_TYPE_WEAK_HANDLE	= B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), +	BINDER_TYPE_FD		= B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), +}; + +enum { +	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, +	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, +}; + +#ifdef BINDER_IPC_32BIT +typedef __u32 binder_size_t; +typedef __u32 binder_uintptr_t; +#else +typedef __u64 binder_size_t; +typedef __u64 binder_uintptr_t; +#endif + +/* + * This is the flattened representation of a Binder object for transfer + * between processes.  The 'offsets' supplied as part of a binder transaction + * contains offsets into the data where these structures occur.  The Binder + * driver takes care of re-writing the structure type and data as it moves + * between processes. + */ +struct flat_binder_object { +	/* 8 bytes for large_flat_header. */ +	__u32		type; +	__u32		flags; + +	/* 8 bytes of data. */ +	union { +		binder_uintptr_t	binder;	/* local object */ +		__u32			handle;	/* remote object */ +	}; + +	/* extra data associated with local object */ +	binder_uintptr_t	cookie; +}; + +/* + * On 64-bit platforms where user code may run in 32-bits the driver must + * translate the buffer (and local binder) addresses appropriately. + */ + +struct binder_write_read { +	binder_size_t		write_size;	/* bytes to write */ +	binder_size_t		write_consumed;	/* bytes consumed by driver */ +	binder_uintptr_t	write_buffer; +	binder_size_t		read_size;	/* bytes to read */ +	binder_size_t		read_consumed;	/* bytes consumed by driver */ +	binder_uintptr_t	read_buffer; +}; + +/* Use with BINDER_VERSION, driver fills in fields. */ +struct binder_version { +	/* driver protocol version -- increment with incompatible change */ +	__s32       protocol_version; +}; + +/* This is the current protocol version. */ +#ifdef BINDER_IPC_32BIT +#define BINDER_CURRENT_PROTOCOL_VERSION 7 +#else +#define BINDER_CURRENT_PROTOCOL_VERSION 8 +#endif + +#define BINDER_WRITE_READ		_IOWR('b', 1, struct binder_write_read) +#define BINDER_SET_IDLE_TIMEOUT		_IOW('b', 3, __s64) +#define BINDER_SET_MAX_THREADS		_IOW('b', 5, __u32) +#define BINDER_SET_IDLE_PRIORITY	_IOW('b', 6, __s32) +#define BINDER_SET_CONTEXT_MGR		_IOW('b', 7, __s32) +#define BINDER_THREAD_EXIT		_IOW('b', 8, __s32) +#define BINDER_VERSION			_IOWR('b', 9, struct binder_version) + +/* + * NOTE: Two special error codes you should check for when calling + * in to the driver are: + * + * EINTR -- The operation has been interupted.  This should be + * handled by retrying the ioctl() until a different error code + * is returned. + * + * ECONNREFUSED -- The driver is no longer accepting operations + * from your process.  That is, the process is being destroyed. + * You should handle this by exiting from your process.  Note + * that once this error code is returned, all further calls to + * the driver from any thread will return this same code. + */ + +enum transaction_flags { +	TF_ONE_WAY	= 0x01,	/* this is a one-way call: async, no return */ +	TF_ROOT_OBJECT	= 0x04,	/* contents are the component's root object */ +	TF_STATUS_CODE	= 0x08,	/* contents are a 32-bit status code */ +	TF_ACCEPT_FDS	= 0x10,	/* allow replies with file descriptors */ +}; + +struct binder_transaction_data { +	/* The first two are only used for bcTRANSACTION and brTRANSACTION, +	 * identifying the target and contents of the transaction. +	 */ +	union { +		/* target descriptor of command transaction */ +		__u32	handle; +		/* target descriptor of return transaction */ +		binder_uintptr_t ptr; +	} target; +	binder_uintptr_t	cookie;	/* target object cookie */ +	__u32		code;		/* transaction command */ + +	/* General information about the transaction. */ +	__u32	        flags; +	pid_t		sender_pid; +	uid_t		sender_euid; +	binder_size_t	data_size;	/* number of bytes of data */ +	binder_size_t	offsets_size;	/* number of bytes of offsets */ + +	/* If this transaction is inline, the data immediately +	 * follows here; otherwise, it ends with a pointer to +	 * the data buffer. +	 */ +	union { +		struct { +			/* transaction data */ +			binder_uintptr_t	buffer; +			/* offsets from buffer to flat_binder_object structs */ +			binder_uintptr_t	offsets; +		} ptr; +		__u8	buf[8]; +	} data; +}; + +struct binder_ptr_cookie { +	binder_uintptr_t ptr; +	binder_uintptr_t cookie; +}; + +struct binder_handle_cookie { +	__u32 handle; +	binder_uintptr_t cookie; +} __attribute__((packed)); + +struct binder_pri_desc { +	__s32 priority; +	__u32 desc; +}; + +struct binder_pri_ptr_cookie { +	__s32 priority; +	binder_uintptr_t ptr; +	binder_uintptr_t cookie; +}; + +enum binder_driver_return_protocol { +	BR_ERROR = _IOR('r', 0, __s32), +	/* +	 * int: error code +	 */ + +	BR_OK = _IO('r', 1), +	/* No parameters! */ + +	BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), +	BR_REPLY = _IOR('r', 3, struct binder_transaction_data), +	/* +	 * binder_transaction_data: the received command. +	 */ + +	BR_ACQUIRE_RESULT = _IOR('r', 4, __s32), +	/* +	 * not currently supported +	 * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. +	 * Else the remote object has acquired a primary reference. +	 */ + +	BR_DEAD_REPLY = _IO('r', 5), +	/* +	 * The target of the last transaction (either a bcTRANSACTION or +	 * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters. +	 */ + +	BR_TRANSACTION_COMPLETE = _IO('r', 6), +	/* +	 * No parameters... always refers to the last transaction requested +	 * (including replies).  Note that this will be sent even for +	 * asynchronous transactions. +	 */ + +	BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), +	BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), +	BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), +	BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), +	/* +	 * void *:	ptr to binder +	 * void *: cookie for binder +	 */ + +	BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), +	/* +	 * not currently supported +	 * int:	priority +	 * void *: ptr to binder +	 * void *: cookie for binder +	 */ + +	BR_NOOP = _IO('r', 12), +	/* +	 * No parameters.  Do nothing and examine the next command.  It exists +	 * primarily so that we can replace it with a BR_SPAWN_LOOPER command. +	 */ + +	BR_SPAWN_LOOPER = _IO('r', 13), +	/* +	 * No parameters.  The driver has determined that a process has no +	 * threads waiting to service incoming transactions.  When a process +	 * receives this command, it must spawn a new service thread and +	 * register it via bcENTER_LOOPER. +	 */ + +	BR_FINISHED = _IO('r', 14), +	/* +	 * not currently supported +	 * stop threadpool thread +	 */ + +	BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ +	BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ + +	BR_FAILED_REPLY = _IO('r', 17), +	/* +	 * The the last transaction (either a bcTRANSACTION or +	 * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters. +	 */ +}; + +enum binder_driver_command_protocol { +	BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), +	BC_REPLY = _IOW('c', 1, struct binder_transaction_data), +	/* +	 * binder_transaction_data: the sent command. +	 */ + +	BC_ACQUIRE_RESULT = _IOW('c', 2, __s32), +	/* +	 * not currently supported +	 * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful. +	 * Else you have acquired a primary reference on the object. +	 */ + +	BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t), +	/* +	 * void *: ptr to transaction data received on a read +	 */ + +	BC_INCREFS = _IOW('c', 4, __u32), +	BC_ACQUIRE = _IOW('c', 5, __u32), +	BC_RELEASE = _IOW('c', 6, __u32), +	BC_DECREFS = _IOW('c', 7, __u32), +	/* +	 * int:	descriptor +	 */ + +	BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), +	BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), +	/* +	 * void *: ptr to binder +	 * void *: cookie for binder +	 */ + +	BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), +	/* +	 * not currently supported +	 * int: priority +	 * int: descriptor +	 */ + +	BC_REGISTER_LOOPER = _IO('c', 11), +	/* +	 * No parameters. +	 * Register a spawned looper thread with the device. +	 */ + +	BC_ENTER_LOOPER = _IO('c', 12), +	BC_EXIT_LOOPER = _IO('c', 13), +	/* +	 * No parameters. +	 * These two commands are sent as an application-level thread +	 * enters and exits the binder loop, respectively.  They are +	 * used so the binder can have an accurate count of the number +	 * of looping threads it has available. +	 */ + +	BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, +						struct binder_handle_cookie), +	/* +	 * int: handle +	 * void *: cookie +	 */ + +	BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, +						struct binder_handle_cookie), +	/* +	 * int: handle +	 * void *: cookie +	 */ + +	BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t), +	/* +	 * void *: cookie +	 */ +}; + +#endif /* _UAPI_LINUX_BINDER_H */ + diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 00000000000..6aa49567337 --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved + *				 carveout heap, allocations are physically + *				 contiguous + * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API + * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask + *				 is used to identify the heaps, so only 32 + *				 total heap types are supported + */ +enum ion_heap_type { +	ION_HEAP_TYPE_SYSTEM, +	ION_HEAP_TYPE_SYSTEM_CONTIG, +	ION_HEAP_TYPE_CARVEOUT, +	ION_HEAP_TYPE_CHUNK, +	ION_HEAP_TYPE_DMA, +	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always +				 are at the end of this enum */ +	ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8) + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1		/* mappings of this buffer should be +					   cached, ion will do cache +					   maintenance when the buffer is +					   mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2	/* mappings of this buffer will created +					   at mmap time, if this is set +					   caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len:		size of the allocation + * @align:		required alignment of the allocation + * @heap_id_mask:	mask of heap ids to allocate from + * @flags:		flags passed to heap + * @handle:		pointer that will be populated with a cookie to use to + *			refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { +	size_t len; +	size_t align; +	unsigned int heap_id_mask; +	unsigned int flags; +	ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle:	a handle + * @fd:		a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { +	ion_user_handle_t handle; +	int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle:	a handle + */ +struct ion_handle_data { +	ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd:	the custom ioctl function to call + * @arg:	additional data to pass to the custom ioctl, typically a user + *		pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { +	unsigned int cmd; +	unsigned long arg; +}; + +#define ION_IOC_MAGIC		'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \ +				      struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle.  Returns the struct with the fd field set to a file + * descriptor open in the current address space.  This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle.  Returns the struct with the fd field set to a file + * descriptor open in the current address space.  This file descriptor + * can then be passed to another process.  The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 00000000000..ffef06f6313 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr:	a pointer to an area at least as large as size + * @offset:	offset into the ion buffer to start reading + * @size:	size to read or write + * @write:	1 to write, 0 to read + */ +struct ion_test_rw_data { +	__u64 ptr; +	__u64 offset; +	__u64 size; +	int write; +	int __padding; +}; + +#define ION_IOC_MAGIC		'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver.  Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ +			_IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping.  Can be + * used by unit tests to emulate a DMA engine as close as possible.  Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ +			_IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping.  Can be + * used by unit tests to test heap map_kernel functions.  Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ +			_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h new file mode 100644 index 00000000000..9b5d4869505 --- /dev/null +++ b/drivers/staging/android/uapi/sw_sync.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SW_SYNC_H +#define _UAPI_LINUX_SW_SYNC_H + +#include <linux/types.h> + +struct sw_sync_create_fence_data { +	__u32	value; +	char	name[32]; +	__s32	fence; /* fd of new fence */ +}; + +#define SW_SYNC_IOC_MAGIC	'W' + +#define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\ +		struct sw_sync_create_fence_data) +#define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32) + +#endif /* _UAPI_LINUX_SW_SYNC_H */ diff --git a/drivers/staging/android/uapi/sync.h b/drivers/staging/android/uapi/sync.h new file mode 100644 index 00000000000..e964c751f6b --- /dev/null +++ b/drivers/staging/android/uapi/sync.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 Google, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_SYNC_H +#define _UAPI_LINUX_SYNC_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct sync_merge_data - data passed to merge ioctl + * @fd2:	file descriptor of second fence + * @name:	name of new fence + * @fence:	returns the fd of the new fence to userspace + */ +struct sync_merge_data { +	__s32	fd2; /* fd of second fence */ +	char	name[32]; /* name of new fence */ +	__s32	fence; /* fd on newly created fence */ +}; + +/** + * struct sync_pt_info - detailed sync_pt information + * @len:		length of sync_pt_info including any driver_data + * @obj_name:		name of parent sync_timeline + * @driver_name:	name of driver implementing the parent + * @status:		status of the sync_pt 0:active 1:signaled <0:error + * @timestamp_ns:	timestamp of status change in nanoseconds + * @driver_data:	any driver dependent data + */ +struct sync_pt_info { +	__u32	len; +	char	obj_name[32]; +	char	driver_name[32]; +	__s32	status; +	__u64	timestamp_ns; + +	__u8	driver_data[0]; +}; + +/** + * struct sync_fence_info_data - data returned from fence info ioctl + * @len:	ioctl caller writes the size of the buffer its passing in. + *		ioctl returns length of sync_fence_data returned to userspace + *		including pt_info. + * @name:	name of fence + * @status:	status of fence. 1: signaled 0:active <0:error + * @pt_info:	a sync_pt_info struct for every sync_pt in the fence + */ +struct sync_fence_info_data { +	__u32	len; +	char	name[32]; +	__s32	status; + +	__u8	pt_info[0]; +}; + +#define SYNC_IOC_MAGIC		'>' + +/** + * DOC: SYNC_IOC_WAIT - wait for a fence to signal + * + * pass timeout in milliseconds.  Waits indefinitely timeout < 0. + */ +#define SYNC_IOC_WAIT		_IOW(SYNC_IOC_MAGIC, 0, __s32) + +/** + * DOC: SYNC_IOC_MERGE - merge two fences + * + * Takes a struct sync_merge_data.  Creates a new fence containing copies of + * the sync_pts in both the calling fd and sync_merge_data.fd2.  Returns the + * new fence's fd in sync_merge_data.fence + */ +#define SYNC_IOC_MERGE		_IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data) + +/** + * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence + * + * Takes a struct sync_fence_info_data with extra space allocated for pt_info. + * Caller should write the size of the buffer into len.  On return, len is + * updated to reflect the total size of the sync_fence_info_data including + * pt_info. + * + * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. + */ +#define SYNC_IOC_FENCE_INFO	_IOWR(SYNC_IOC_MAGIC, 2,\ +	struct sync_fence_info_data) + +#endif /* _UAPI_LINUX_SYNC_H */  | 
