diff options
Diffstat (limited to 'kernel/hrtimer.c')
| -rw-r--r-- | kernel/hrtimer.c | 49 | 
1 files changed, 30 insertions, 19 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 383319bae3f..3ab28993f6e 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -46,6 +46,7 @@  #include <linux/sched.h>  #include <linux/sched/sysctl.h>  #include <linux/sched/rt.h> +#include <linux/sched/deadline.h>  #include <linux/timer.h>  #include <linux/freezer.h> @@ -167,19 +168,6 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,  	}  } - -/* - * Get the preferred target CPU for NOHZ - */ -static int hrtimer_get_target(int this_cpu, int pinned) -{ -#ifdef CONFIG_NO_HZ_COMMON -	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) -		return get_nohz_timer_target(); -#endif -	return this_cpu; -} -  /*   * With HIGHRES=y we do not migrate the timer when it is expiring   * before the next event on the target cpu because we cannot reprogram @@ -213,7 +201,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,  	struct hrtimer_clock_base *new_base;  	struct hrtimer_cpu_base *new_cpu_base;  	int this_cpu = smp_processor_id(); -	int cpu = hrtimer_get_target(this_cpu, pinned); +	int cpu = get_nohz_timer_target(pinned);  	int basenum = base->index;  again: @@ -246,6 +234,11 @@ again:  			goto again;  		}  		timer->base = new_base; +	} else { +		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { +			cpu = this_cpu; +			goto again; +		}  	}  	return new_base;  } @@ -581,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)  	cpu_base->expires_next.tv64 = expires_next.tv64; +	/* +	 * If a hang was detected in the last timer interrupt then we +	 * leave the hang delay active in the hardware. We want the +	 * system to make progress. That also prevents the following +	 * scenario: +	 * T1 expires 50ms from now +	 * T2 expires 5s from now +	 * +	 * T1 is removed, so this code is called and would reprogram +	 * the hardware to 5s from now. Any hrtimer_start after that +	 * will not reprogram the hardware due to hang_detected being +	 * set. So we'd effectivly block all timers until the T2 event +	 * fires. +	 */ +	if (cpu_base->hang_detected) +		return; +  	if (cpu_base->expires_next.tv64 != KTIME_MAX)  		tick_program_event(cpu_base->expires_next, 1);  } @@ -980,11 +990,8 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,  	/* Remove an active timer from the queue: */  	ret = remove_hrtimer(timer, base); -	/* Switch the timer base, if necessary: */ -	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); -  	if (mode & HRTIMER_MODE_REL) { -		tim = ktime_add_safe(tim, new_base->get_time()); +		tim = ktime_add_safe(tim, base->get_time());  		/*  		 * CONFIG_TIME_LOW_RES is a temporary way for architectures  		 * to signal that they simply return xtime in @@ -999,6 +1006,9 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,  	hrtimer_set_expires_range_ns(timer, tim, delta_ns); +	/* Switch the timer base, if necessary: */ +	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); +  	timer_stats_hrtimer_set_start_info(timer);  	leftmost = enqueue_hrtimer(timer, new_base); @@ -1029,6 +1039,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,  	return ret;  } +EXPORT_SYMBOL_GPL(__hrtimer_start_range_ns);  /**   * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU @@ -1610,7 +1621,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,  	unsigned long slack;  	slack = current->timer_slack_ns; -	if (rt_task(current)) +	if (dl_task(current) || rt_task(current))  		slack = 0;  	hrtimer_init_on_stack(&t.timer, clockid, mode);  | 
