diff options
Diffstat (limited to 'kernel/time/clockevents.c')
| -rw-r--r-- | kernel/time/clockevents.c | 117 | 
1 files changed, 87 insertions, 30 deletions
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 38959c86678..9c94c19f130 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -33,29 +33,64 @@ struct ce_unbind {  	int res;  }; -/** - * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds - * @latch:	value to convert - * @evt:	pointer to clock event device descriptor - * - * Math helper, returns latch value converted to nanoseconds (bound checked) - */ -u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) +static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt, +			bool ismax)  {  	u64 clc = (u64) latch << evt->shift; +	u64 rnd;  	if (unlikely(!evt->mult)) {  		evt->mult = 1;  		WARN_ON(1);  	} +	rnd = (u64) evt->mult - 1; + +	/* +	 * Upper bound sanity check. If the backwards conversion is +	 * not equal latch, we know that the above shift overflowed. +	 */ +	if ((clc >> evt->shift) != (u64)latch) +		clc = ~0ULL; + +	/* +	 * Scaled math oddities: +	 * +	 * For mult <= (1 << shift) we can safely add mult - 1 to +	 * prevent integer rounding loss. So the backwards conversion +	 * from nsec to device ticks will be correct. +	 * +	 * For mult > (1 << shift), i.e. device frequency is > 1GHz we +	 * need to be careful. Adding mult - 1 will result in a value +	 * which when converted back to device ticks can be larger +	 * than latch by up to (mult - 1) >> shift. For the min_delta +	 * calculation we still want to apply this in order to stay +	 * above the minimum device ticks limit. For the upper limit +	 * we would end up with a latch value larger than the upper +	 * limit of the device, so we omit the add to stay below the +	 * device upper boundary. +	 * +	 * Also omit the add if it would overflow the u64 boundary. +	 */ +	if ((~0ULL - clc > rnd) && +	    (!ismax || evt->mult <= (1U << evt->shift))) +		clc += rnd;  	do_div(clc, evt->mult); -	if (clc < 1000) -		clc = 1000; -	if (clc > KTIME_MAX) -		clc = KTIME_MAX; -	return clc; +	/* Deltas less than 1usec are pointless noise */ +	return clc > 1000 ? clc : 1000; +} + +/** + * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds + * @latch:	value to convert + * @evt:	pointer to clock event device descriptor + * + * Math helper, returns latch value converted to nanoseconds (bound checked) + */ +u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt) +{ +	return cev_delta2ns(latch, evt, false);  }  EXPORT_SYMBOL_GPL(clockevent_delta2ns); @@ -111,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)  {  	/* Nothing to do if we already reached the limit */  	if (dev->min_delta_ns >= MIN_DELTA_LIMIT) { -		printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n"); +		printk_deferred(KERN_WARNING +				"CE: Reprogramming failure. Giving up\n");  		dev->next_event.tv64 = KTIME_MAX;  		return -ETIME;  	} @@ -124,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)  	if (dev->min_delta_ns > MIN_DELTA_LIMIT)  		dev->min_delta_ns = MIN_DELTA_LIMIT; -	printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", -	       dev->name ? dev->name : "?", -	       (unsigned long long) dev->min_delta_ns); +	printk_deferred(KERN_WARNING +			"CE: %s increased min_delta_ns to %llu nsec\n", +			dev->name ? dev->name : "?", +			(unsigned long long) dev->min_delta_ns);  	return 0;  } @@ -380,8 +417,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)  		sec = 600;  	clockevents_calc_mult_shift(dev, freq, sec); -	dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev); -	dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev); +	dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false); +	dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);  }  /** @@ -404,6 +441,19 @@ void clockevents_config_and_register(struct clock_event_device *dev,  }  EXPORT_SYMBOL_GPL(clockevents_config_and_register); +int __clockevents_update_freq(struct clock_event_device *dev, u32 freq) +{ +	clockevents_config(dev, freq); + +	if (dev->mode == CLOCK_EVT_MODE_ONESHOT) +		return clockevents_program_event(dev, dev->next_event, false); + +	if (dev->mode == CLOCK_EVT_MODE_PERIODIC) +		dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev); + +	return 0; +} +  /**   * clockevents_update_freq - Update frequency and reprogram a clock event device.   * @dev:	device to modify @@ -411,17 +461,22 @@ EXPORT_SYMBOL_GPL(clockevents_config_and_register);   *   * Reconfigure and reprogram a clock event device in oneshot   * mode. Must be called on the cpu for which the device delivers per - * cpu timer events with interrupts disabled!  Returns 0 on success, - * -ETIME when the event is in the past. + * cpu timer events. If called for the broadcast device the core takes + * care of serialization. + * + * Returns 0 on success, -ETIME when the event is in the past.   */  int clockevents_update_freq(struct clock_event_device *dev, u32 freq)  { -	clockevents_config(dev, freq); - -	if (dev->mode != CLOCK_EVT_MODE_ONESHOT) -		return 0; +	unsigned long flags; +	int ret; -	return clockevents_program_event(dev, dev->next_event, false); +	local_irq_save(flags); +	ret = tick_broadcast_update_freq(dev, freq); +	if (ret == -ENODEV) +		ret = __clockevents_update_freq(dev, freq); +	local_irq_restore(flags); +	return ret;  }  /* @@ -489,12 +544,13 @@ void clockevents_resume(void)  #ifdef CONFIG_GENERIC_CLOCKEVENTS  /**   * clockevents_notify - notification about relevant events + * Returns 0 on success, any other value on error   */ -void clockevents_notify(unsigned long reason, void *arg) +int clockevents_notify(unsigned long reason, void *arg)  {  	struct clock_event_device *dev, *tmp;  	unsigned long flags; -	int cpu; +	int cpu, ret = 0;  	raw_spin_lock_irqsave(&clockevents_lock, flags); @@ -507,7 +563,7 @@ void clockevents_notify(unsigned long reason, void *arg)  	case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:  	case CLOCK_EVT_NOTIFY_BROADCAST_EXIT: -		tick_broadcast_oneshot_control(reason); +		ret = tick_broadcast_oneshot_control(reason);  		break;  	case CLOCK_EVT_NOTIFY_CPU_DYING: @@ -550,6 +606,7 @@ void clockevents_notify(unsigned long reason, void *arg)  		break;  	}  	raw_spin_unlock_irqrestore(&clockevents_lock, flags); +	return ret;  }  EXPORT_SYMBOL_GPL(clockevents_notify); @@ -584,7 +641,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev,  				     const char *buf, size_t count)  {  	char name[CS_NAME_LEN]; -	size_t ret = sysfs_get_uname(buf, name, count); +	ssize_t ret = sysfs_get_uname(buf, name, count);  	struct clock_event_device *ce;  	if (ret < 0)  | 
