diff options
Diffstat (limited to 'kernel/time/tick-common.c')
| -rw-r--r-- | kernel/time/tick-common.c | 34 | 
1 files changed, 25 insertions, 9 deletions
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 64522ecdfe0..0a0608edeb2 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -33,6 +33,21 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);   */  ktime_t tick_next_period;  ktime_t tick_period; + +/* + * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR + * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This + * variable has two functions: + * + * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the + *    timekeeping lock all at once. Only the CPU which is assigned to do the + *    update is handling it. + * + * 2) Hand off the duty in the NOHZ idle case by setting the value to + *    TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks + *    at it will take over and keep the time keeping alive.  The handover + *    procedure also covers cpu hotplug. + */  int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;  /* @@ -70,6 +85,7 @@ static void tick_periodic(int cpu)  		do_timer(1);  		write_sequnlock(&jiffies_lock); +		update_wall_time();  	}  	update_process_times(user_mode(get_irq_regs())); @@ -82,18 +98,19 @@ static void tick_periodic(int cpu)  void tick_handle_periodic(struct clock_event_device *dev)  {  	int cpu = smp_processor_id(); -	ktime_t next; +	ktime_t next = dev->next_event;  	tick_periodic(cpu);  	if (dev->mode != CLOCK_EVT_MODE_ONESHOT)  		return; -	/* -	 * Setup the next period for devices, which do not have -	 * periodic mode: -	 */ -	next = ktime_add(dev->next_event, tick_period);  	for (;;) { +		/* +		 * Setup the next period for devices, which do not have +		 * periodic mode: +		 */ +		next = ktime_add(next, tick_period); +  		if (!clockevents_program_event(dev, next, false))  			return;  		/* @@ -102,12 +119,11 @@ void tick_handle_periodic(struct clock_event_device *dev)  		 * to be sure we're using a real hardware clocksource.  		 * Otherwise we could get trapped in an infinite  		 * loop, as the tick_periodic() increments jiffies, -		 * when then will increment time, posibly causing +		 * which then will increment time, possibly causing  		 * the loop to trigger again and again.  		 */  		if (timekeeping_valid_for_hres())  			tick_periodic(cpu); -		next = ktime_add(next, tick_period);  	}  } @@ -260,7 +276,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,  bool tick_check_replacement(struct clock_event_device *curdev,  			    struct clock_event_device *newdev)  { -	if (tick_check_percpu(curdev, newdev, smp_processor_id())) +	if (!tick_check_percpu(curdev, newdev, smp_processor_id()))  		return false;  	return tick_check_preferred(curdev, newdev);  | 
