From deda2e81961e96be4f2c09328baca4710a2fd1a0 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Mon, 9 Aug 2010 14:20:09 -0700 Subject: timekeeping: Fix overflow in rawtime tv_nsec on 32 bit archs The tv_nsec is a long and when added to the shifted interval it can wrap and become negative which later causes looping problems in the getrawmonotonic(). The edge case occurs when the system has slept for a short period of time of ~2 seconds. A trace printk of the values in this patch illustrate the problem: ftrace time stamp: log 43.716079: logarithmic_accumulation: raw: 3d0913 tv_nsec d687faa 43.718513: logarithmic_accumulation: raw: 3d0913 tv_nsec da588bd 43.722161: logarithmic_accumulation: raw: 3d0913 tv_nsec de291d0 46.349925: logarithmic_accumulation: raw: 7a122600 tv_nsec e1f9ae3 46.349930: logarithmic_accumulation: raw: 1e848980 tv_nsec 8831c0e3 The kernel starts looping at 46.349925 in the getrawmonotonic() due to the negative value from adding the raw value to tv_nsec. A simple solution is to accumulate into a u64, and then normalize it to a timespec_t. Signed-off-by: Jason Wessel [ Reworked variable names and simplified some of the code. - John ] Signed-off-by: John Stultz Cc: Thomas Gleixner Cc: H. Peter Anvin Signed-off-by: Linus Torvalds --- kernel/time/timekeeping.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e14c839e9fa..e960d824263 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -690,6 +690,7 @@ static void timekeeping_adjust(s64 offset) static cycle_t logarithmic_accumulation(cycle_t offset, int shift) { u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; + u64 raw_nsecs; /* If the offset is smaller then a shifted interval, do nothing */ if (offset < timekeeper.cycle_interval<= NSEC_PER_SEC) { - raw_time.tv_nsec -= NSEC_PER_SEC; + /* Accumulate raw time */ + raw_nsecs = timekeeper.raw_interval << shift; + raw_nsecs += raw_time.tv_nsec; + while (raw_nsecs >= NSEC_PER_SEC) { + raw_nsecs -= NSEC_PER_SEC; raw_time.tv_sec++; } + raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ timekeeper.ntp_error += tick_length << shift; -- cgit v1.2.3-70-g09d2 From c7dcf87a6881bf796faee83003163eb3de41a309 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Fri, 13 Aug 2010 11:30:58 -0700 Subject: time: Workaround gcc loop optimization that causes 64bit div errors Early 4.3 versions of gcc apparently aggressively optimize the raw time accumulation loop, replacing it with a divide. On 32bit systems, this causes the following link errors: undefined reference to `__umoddi3' undefined reference to `__udivdi3' The gcc issue has been fixed in 4.4 and greater. This patch replaces the accumulation loop with a do_div, as suggested by Linus. Signed-off-by: John Stultz CC: Jason Wessel CC: Larry Finger CC: Ingo Molnar CC: Linus Torvalds Signed-off-by: Linus Torvalds --- kernel/time/timekeeping.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e960d824263..49010d822f7 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -710,9 +710,10 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) /* Accumulate raw time */ raw_nsecs = timekeeper.raw_interval << shift; raw_nsecs += raw_time.tv_nsec; - while (raw_nsecs >= NSEC_PER_SEC) { - raw_nsecs -= NSEC_PER_SEC; - raw_time.tv_sec++; + if (raw_nsecs >= NSEC_PER_SEC) { + u64 raw_secs = raw_nsecs; + raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); + raw_time.tv_sec += raw_secs; } raw_time.tv_nsec = raw_nsecs; -- cgit v1.2.3-70-g09d2 From 8af3c153baf95374eff20a37f00c59a295b52756 Mon Sep 17 00:00:00 2001 From: Miroslav Lichvar Date: Tue, 7 Sep 2010 16:43:46 +0200 Subject: ntp: Clamp PLL update interval Clamp update interval to reduce PLL gain with low sampling rate (e.g. intermittent network connection) to avoid instability. The clamp roughly corresponds to the loop time constant, it's 8 * poll interval for SHIFT_PLL 2 and 32 * poll interval for SHIFT_PLL 4. This gives good results without affecting the gain in normal conditions where ntpd skips only up to seven consecutive samples. Signed-off-by: Miroslav Lichvar Acked-by: john stultz LKML-Reference: <1283870626-9472-1-git-send-email-mlichvar@redhat.com> Signed-off-by: Thomas Gleixner --- kernel/time/ntp.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'kernel/time') diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index c63116863a8..d2321891538 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -149,10 +149,18 @@ static void ntp_update_offset(long offset) time_reftime = get_seconds(); offset64 = offset; - freq_adj = (offset64 * secs) << - (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant)); + freq_adj = ntp_update_offset_fll(offset64, secs); - freq_adj += ntp_update_offset_fll(offset64, secs); + /* + * Clamp update interval to reduce PLL gain with low + * sampling rate (e.g. intermittent network connection) + * to avoid instability. + */ + if (unlikely(secs > 1 << (SHIFT_PLL + 1 + time_constant))) + secs = 1 << (SHIFT_PLL + 1 + time_constant); + + freq_adj += (offset64 * secs) << + (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant)); freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED); -- cgit v1.2.3-70-g09d2