diff options
Diffstat (limited to 'kernel/time/timekeeping.c')
| -rw-r--r-- | kernel/time/timekeeping.c | 71 | 
1 files changed, 37 insertions, 34 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 947ba25a95a..32d8d6aaedb 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -22,6 +22,7 @@  #include <linux/tick.h>  #include <linux/stop_machine.h>  #include <linux/pvclock_gtod.h> +#include <linux/compiler.h>  #include "tick-internal.h"  #include "ntp_internal.h" @@ -77,7 +78,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)  	tk->wall_to_monotonic = wtm;  	set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);  	tk->offs_real = timespec_to_ktime(tmp); -	tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tk->tai_offset, 0)); +	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));  }  static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) @@ -90,8 +91,9 @@ static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)  }  /** - * timekeeper_setup_internals - Set up internals to use clocksource clock. + * tk_setup_internals - Set up internals to use clocksource clock.   * + * @tk:		The target timekeeper to setup.   * @clock:		Pointer to clocksource.   *   * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment @@ -595,7 +597,7 @@ s32 timekeeping_get_tai_offset(void)  static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)  {  	tk->tai_offset = tai_offset; -	tk->offs_tai = ktime_sub(tk->offs_real, ktime_set(tai_offset, 0)); +	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));  }  /** @@ -610,6 +612,7 @@ void timekeeping_set_tai_offset(s32 tai_offset)  	raw_spin_lock_irqsave(&timekeeper_lock, flags);  	write_seqcount_begin(&timekeeper_seq);  	__timekeeping_set_tai_offset(tk, tai_offset); +	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);  	write_seqcount_end(&timekeeper_seq);  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags);  	clock_was_set(); @@ -758,7 +761,7 @@ u64 timekeeping_max_deferment(void)   *   *  XXX - Do be sure to remove it once all arches implement it.   */ -void __attribute__((weak)) read_persistent_clock(struct timespec *ts) +void __weak read_persistent_clock(struct timespec *ts)  {  	ts->tv_sec = 0;  	ts->tv_nsec = 0; @@ -773,7 +776,7 @@ void __attribute__((weak)) read_persistent_clock(struct timespec *ts)   *   *  XXX - Do be sure to remove it once all arches implement it.   */ -void __attribute__((weak)) read_boot_clock(struct timespec *ts) +void __weak read_boot_clock(struct timespec *ts)  {  	ts->tv_sec = 0;  	ts->tv_nsec = 0; @@ -849,8 +852,9 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,  							struct timespec *delta)  {  	if (!timespec_valid_strict(delta)) { -		printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " -					"sleep delta value!\n"); +		printk_deferred(KERN_WARNING +				"__timekeeping_inject_sleeptime: Invalid " +				"sleep delta value!\n");  		return;  	}  	tk_xtime_add(tk, delta); @@ -1023,6 +1027,8 @@ static int timekeeping_suspend(void)  		timekeeping_suspend_time =  			timespec_add(timekeeping_suspend_time, delta_delta);  	} + +	timekeeping_update(tk, TK_MIRROR);  	write_seqcount_end(&timekeeper_seq);  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags); @@ -1130,16 +1136,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)  		 * we can adjust by 1.  		 */  		error >>= 2; -		/* -		 * XXX - In update_wall_time, we round up to the next -		 * nanosecond, and store the amount rounded up into -		 * the error. This causes the likely below to be unlikely. -		 * -		 * The proper fix is to avoid rounding up by using -		 * the high precision tk->xtime_nsec instead of -		 * xtime.tv_nsec everywhere. Fixing this will take some -		 * time. -		 */  		if (likely(error <= interval))  			adj = 1;  		else @@ -1162,7 +1158,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)  	if (unlikely(tk->clock->maxadj &&  		(tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { -		printk_once(KERN_WARNING +		printk_deferred_once(KERN_WARNING  			"Adjusting %s more than 11%% (%ld vs %ld)\n",  			tk->clock->name, (long)tk->mult + adj,  			(long)tk->clock->mult + tk->clock->maxadj); @@ -1255,7 +1251,7 @@ out_adjust:  static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)  {  	u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; -	unsigned int action = 0; +	unsigned int clock_set = 0;  	while (tk->xtime_nsec >= nsecps) {  		int leap; @@ -1277,11 +1273,10 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)  			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap); -			clock_was_set_delayed(); -			action = TK_CLOCK_WAS_SET; +			clock_set = TK_CLOCK_WAS_SET;  		}  	} -	return action; +	return clock_set;  }  /** @@ -1294,7 +1289,8 @@ static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)   * Returns the unconsumed cycles.   */  static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, -						u32 shift) +						u32 shift, +						unsigned int *clock_set)  {  	cycle_t interval = tk->cycle_interval << shift;  	u64 raw_nsecs; @@ -1308,7 +1304,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,  	tk->cycle_last += interval;  	tk->xtime_nsec += tk->xtime_interval << shift; -	accumulate_nsecs_to_secs(tk); +	*clock_set |= accumulate_nsecs_to_secs(tk);  	/* Accumulate raw time */  	raw_nsecs = (u64)tk->raw_interval << shift; @@ -1347,7 +1343,7 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)  	tk->xtime_nsec -= remainder;  	tk->xtime_nsec += 1ULL << tk->shift;  	tk->ntp_error += remainder << tk->ntp_error_shift; - +	tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;  }  #else  #define old_vsyscall_fixup(tk) @@ -1359,14 +1355,14 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)   * update_wall_time - Uses the current clocksource to increment the wall time   *   */ -static void update_wall_time(void) +void update_wall_time(void)  {  	struct clocksource *clock;  	struct timekeeper *real_tk = &timekeeper;  	struct timekeeper *tk = &shadow_timekeeper;  	cycle_t offset;  	int shift = 0, maxshift; -	unsigned int action; +	unsigned int clock_set = 0;  	unsigned long flags;  	raw_spin_lock_irqsave(&timekeeper_lock, flags); @@ -1401,7 +1397,8 @@ static void update_wall_time(void)  	maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;  	shift = min(shift, maxshift);  	while (offset >= tk->cycle_interval) { -		offset = logarithmic_accumulation(tk, offset, shift); +		offset = logarithmic_accumulation(tk, offset, shift, +							&clock_set);  		if (offset < tk->cycle_interval<<shift)  			shift--;  	} @@ -1419,7 +1416,7 @@ static void update_wall_time(void)  	 * Finally, make sure that after the rounding  	 * xtime_nsec isn't larger than NSEC_PER_SEC  	 */ -	action = accumulate_nsecs_to_secs(tk); +	clock_set |= accumulate_nsecs_to_secs(tk);  	write_seqcount_begin(&timekeeper_seq);  	/* Update clock->cycle_last with the new value */ @@ -1435,10 +1432,13 @@ static void update_wall_time(void)  	 * updating.  	 */  	memcpy(real_tk, tk, sizeof(*tk)); -	timekeeping_update(real_tk, action); +	timekeeping_update(real_tk, clock_set);  	write_seqcount_end(&timekeeper_seq);  out:  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +	if (clock_set) +		/* Have to call _delayed version, since in irq context*/ +		clock_was_set_delayed();  }  /** @@ -1583,7 +1583,6 @@ struct timespec get_monotonic_coarse(void)  void do_timer(unsigned long ticks)  {  	jiffies_64 += ticks; -	update_wall_time();  	calc_global_load(ticks);  } @@ -1613,9 +1612,10 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,   * ktime_get_update_offsets - hrtimer helper   * @offs_real:	pointer to storage for monotonic -> realtime offset   * @offs_boot:	pointer to storage for monotonic -> boottime offset + * @offs_tai:	pointer to storage for monotonic -> clock tai offset   *   * Returns current monotonic time and updates the offsets - * Called from hrtimer_interupt() or retrigger_next_event() + * Called from hrtimer_interrupt() or retrigger_next_event()   */  ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,  							ktime_t *offs_tai) @@ -1697,12 +1697,14 @@ int do_adjtimex(struct timex *txc)  	if (tai != orig_tai) {  		__timekeeping_set_tai_offset(tk, tai); -		update_pvclock_gtod(tk, true); -		clock_was_set_delayed(); +		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);  	}  	write_seqcount_end(&timekeeper_seq);  	raw_spin_unlock_irqrestore(&timekeeper_lock, flags); +	if (tai != orig_tai) +		clock_was_set(); +  	ntp_notify_cmos_timer();  	return ret; @@ -1738,4 +1740,5 @@ void xtime_update(unsigned long ticks)  	write_seqlock(&jiffies_lock);  	do_timer(ticks);  	write_sequnlock(&jiffies_lock); +	update_wall_time();  }  | 
