diff options
Diffstat (limited to 'kernel/time/clockevents.c')
| -rw-r--r-- | kernel/time/clockevents.c | 271 | 
1 files changed, 240 insertions, 31 deletions
| diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index c6d6400ee13..38959c86678 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -15,20 +15,23 @@  #include <linux/hrtimer.h>  #include <linux/init.h>  #include <linux/module.h> -#include <linux/notifier.h>  #include <linux/smp.h> +#include <linux/device.h>  #include "tick-internal.h"  /* The registered clock event devices */  static LIST_HEAD(clockevent_devices);  static LIST_HEAD(clockevents_released); - -/* Notification for clock events */ -static RAW_NOTIFIER_HEAD(clockevents_chain); -  /* Protection for the above */  static DEFINE_RAW_SPINLOCK(clockevents_lock); +/* Protection for unbind operations */ +static DEFINE_MUTEX(clockevents_mutex); + +struct ce_unbind { +	struct clock_event_device *ce; +	int res; +};  /**   * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds @@ -232,47 +235,107 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,  	return (rc && force) ? clockevents_program_min_delta(dev) : rc;  } -/** - * clockevents_register_notifier - register a clock events change listener +/* + * Called after a notify add to make devices available which were + * released from the notifier call.   */ -int clockevents_register_notifier(struct notifier_block *nb) +static void clockevents_notify_released(void)  { -	unsigned long flags; -	int ret; +	struct clock_event_device *dev; -	raw_spin_lock_irqsave(&clockevents_lock, flags); -	ret = raw_notifier_chain_register(&clockevents_chain, nb); -	raw_spin_unlock_irqrestore(&clockevents_lock, flags); +	while (!list_empty(&clockevents_released)) { +		dev = list_entry(clockevents_released.next, +				 struct clock_event_device, list); +		list_del(&dev->list); +		list_add(&dev->list, &clockevent_devices); +		tick_check_new_device(dev); +	} +} -	return ret; +/* + * Try to install a replacement clock event device + */ +static int clockevents_replace(struct clock_event_device *ced) +{ +	struct clock_event_device *dev, *newdev = NULL; + +	list_for_each_entry(dev, &clockevent_devices, list) { +		if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED) +			continue; + +		if (!tick_check_replacement(newdev, dev)) +			continue; + +		if (!try_module_get(dev->owner)) +			continue; + +		if (newdev) +			module_put(newdev->owner); +		newdev = dev; +	} +	if (newdev) { +		tick_install_replacement(newdev); +		list_del_init(&ced->list); +	} +	return newdev ? 0 : -EBUSY;  }  /* - * Notify about a clock event change. Called with clockevents_lock - * held. + * Called with clockevents_mutex and clockevents_lock held   */ -static void clockevents_do_notify(unsigned long reason, void *dev) +static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)  { -	raw_notifier_call_chain(&clockevents_chain, reason, dev); +	/* Fast track. Device is unused */ +	if (ced->mode == CLOCK_EVT_MODE_UNUSED) { +		list_del_init(&ced->list); +		return 0; +	} + +	return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;  }  /* - * Called after a notify add to make devices available which were - * released from the notifier call. + * SMP function call to unbind a device   */ -static void clockevents_notify_released(void) +static void __clockevents_unbind(void *arg)  { -	struct clock_event_device *dev; +	struct ce_unbind *cu = arg; +	int res; + +	raw_spin_lock(&clockevents_lock); +	res = __clockevents_try_unbind(cu->ce, smp_processor_id()); +	if (res == -EAGAIN) +		res = clockevents_replace(cu->ce); +	cu->res = res; +	raw_spin_unlock(&clockevents_lock); +} -	while (!list_empty(&clockevents_released)) { -		dev = list_entry(clockevents_released.next, -				 struct clock_event_device, list); -		list_del(&dev->list); -		list_add(&dev->list, &clockevent_devices); -		clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); -	} +/* + * Issues smp function call to unbind a per cpu device. Called with + * clockevents_mutex held. + */ +static int clockevents_unbind(struct clock_event_device *ced, int cpu) +{ +	struct ce_unbind cu = { .ce = ced, .res = -ENODEV }; + +	smp_call_function_single(cpu, __clockevents_unbind, &cu, 1); +	return cu.res;  } +/* + * Unbind a clockevents device. + */ +int clockevents_unbind_device(struct clock_event_device *ced, int cpu) +{ +	int ret; + +	mutex_lock(&clockevents_mutex); +	ret = clockevents_unbind(ced, cpu); +	mutex_unlock(&clockevents_mutex); +	return ret; +} +EXPORT_SYMBOL_GPL(clockevents_unbind); +  /**   * clockevents_register_device - register a clock event device   * @dev:	device to register @@ -290,7 +353,7 @@ void clockevents_register_device(struct clock_event_device *dev)  	raw_spin_lock_irqsave(&clockevents_lock, flags);  	list_add(&dev->list, &clockevent_devices); -	clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); +	tick_check_new_device(dev);  	clockevents_notify_released();  	raw_spin_unlock_irqrestore(&clockevents_lock, flags); @@ -386,6 +449,7 @@ void clockevents_exchange_device(struct clock_event_device *old,  	 * released list and do a notify add later.  	 */  	if (old) { +		module_put(old->owner);  		clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);  		list_del(&old->list);  		list_add(&old->list, &clockevents_released); @@ -433,10 +497,36 @@ void clockevents_notify(unsigned long reason, void *arg)  	int cpu;  	raw_spin_lock_irqsave(&clockevents_lock, flags); -	clockevents_do_notify(reason, arg);  	switch (reason) { +	case CLOCK_EVT_NOTIFY_BROADCAST_ON: +	case CLOCK_EVT_NOTIFY_BROADCAST_OFF: +	case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: +		tick_broadcast_on_off(reason, arg); +		break; + +	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER: +	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: +		tick_broadcast_oneshot_control(reason); +		break; + +	case CLOCK_EVT_NOTIFY_CPU_DYING: +		tick_handover_do_timer(arg); +		break; + +	case CLOCK_EVT_NOTIFY_SUSPEND: +		tick_suspend(); +		tick_suspend_broadcast(); +		break; + +	case CLOCK_EVT_NOTIFY_RESUME: +		tick_resume(); +		break; +  	case CLOCK_EVT_NOTIFY_CPU_DEAD: +		tick_shutdown_broadcast_oneshot(arg); +		tick_shutdown_broadcast(arg); +		tick_shutdown(arg);  		/*  		 * Unregister the clock event devices which were  		 * released from the users in the notify chain. @@ -462,4 +552,123 @@ void clockevents_notify(unsigned long reason, void *arg)  	raw_spin_unlock_irqrestore(&clockevents_lock, flags);  }  EXPORT_SYMBOL_GPL(clockevents_notify); + +#ifdef CONFIG_SYSFS +struct bus_type clockevents_subsys = { +	.name		= "clockevents", +	.dev_name       = "clockevent", +}; + +static DEFINE_PER_CPU(struct device, tick_percpu_dev); +static struct tick_device *tick_get_tick_dev(struct device *dev); + +static ssize_t sysfs_show_current_tick_dev(struct device *dev, +					   struct device_attribute *attr, +					   char *buf) +{ +	struct tick_device *td; +	ssize_t count = 0; + +	raw_spin_lock_irq(&clockevents_lock); +	td = tick_get_tick_dev(dev); +	if (td && td->evtdev) +		count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name); +	raw_spin_unlock_irq(&clockevents_lock); +	return count; +} +static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL); + +/* We don't support the abomination of removable broadcast devices */ +static ssize_t sysfs_unbind_tick_dev(struct device *dev, +				     struct device_attribute *attr, +				     const char *buf, size_t count) +{ +	char name[CS_NAME_LEN]; +	size_t ret = sysfs_get_uname(buf, name, count); +	struct clock_event_device *ce; + +	if (ret < 0) +		return ret; + +	ret = -ENODEV; +	mutex_lock(&clockevents_mutex); +	raw_spin_lock_irq(&clockevents_lock); +	list_for_each_entry(ce, &clockevent_devices, list) { +		if (!strcmp(ce->name, name)) { +			ret = __clockevents_try_unbind(ce, dev->id); +			break; +		} +	} +	raw_spin_unlock_irq(&clockevents_lock); +	/* +	 * We hold clockevents_mutex, so ce can't go away +	 */ +	if (ret == -EAGAIN) +		ret = clockevents_unbind(ce, dev->id); +	mutex_unlock(&clockevents_mutex); +	return ret ? ret : count; +} +static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev); + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +static struct device tick_bc_dev = { +	.init_name	= "broadcast", +	.id		= 0, +	.bus		= &clockevents_subsys, +}; + +static struct tick_device *tick_get_tick_dev(struct device *dev) +{ +	return dev == &tick_bc_dev ? tick_get_broadcast_device() : +		&per_cpu(tick_cpu_device, dev->id); +} + +static __init int tick_broadcast_init_sysfs(void) +{ +	int err = device_register(&tick_bc_dev); + +	if (!err) +		err = device_create_file(&tick_bc_dev, &dev_attr_current_device); +	return err; +} +#else +static struct tick_device *tick_get_tick_dev(struct device *dev) +{ +	return &per_cpu(tick_cpu_device, dev->id); +} +static inline int tick_broadcast_init_sysfs(void) { return 0; }  #endif + +static int __init tick_init_sysfs(void) +{ +	int cpu; + +	for_each_possible_cpu(cpu) { +		struct device *dev = &per_cpu(tick_percpu_dev, cpu); +		int err; + +		dev->id = cpu; +		dev->bus = &clockevents_subsys; +		err = device_register(dev); +		if (!err) +			err = device_create_file(dev, &dev_attr_current_device); +		if (!err) +			err = device_create_file(dev, &dev_attr_unbind_device); +		if (err) +			return err; +	} +	return tick_broadcast_init_sysfs(); +} + +static int __init clockevents_init_sysfs(void) +{ +	int err = subsys_system_register(&clockevents_subsys, NULL); + +	if (!err) +		err = tick_init_sysfs(); +	return err; +} +device_initcall(clockevents_init_sysfs); +#endif /* SYSFS */ + +#endif /* GENERIC_CLOCK_EVENTS */ | 
