diff options
Diffstat (limited to 'kernel/time/sched_clock.c')
| -rw-r--r-- | kernel/time/sched_clock.c | 145 | 
1 files changed, 75 insertions, 70 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 0b479a6a22b..01d2d15aa66 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -8,25 +8,28 @@  #include <linux/clocksource.h>  #include <linux/init.h>  #include <linux/jiffies.h> +#include <linux/ktime.h>  #include <linux/kernel.h>  #include <linux/moduleparam.h>  #include <linux/sched.h>  #include <linux/syscore_ops.h> -#include <linux/timer.h> +#include <linux/hrtimer.h>  #include <linux/sched_clock.h> +#include <linux/seqlock.h> +#include <linux/bitops.h>  struct clock_data { +	ktime_t wrap_kt;  	u64 epoch_ns; -	u32 epoch_cyc; -	u32 epoch_cyc_copy; +	u64 epoch_cyc; +	seqcount_t seq;  	unsigned long rate;  	u32 mult;  	u32 shift;  	bool suspended;  }; -static void sched_clock_poll(unsigned long wrap_ticks); -static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0); +static struct hrtimer sched_clock_timer;  static int irqtime = -1;  core_param(irqtime, irqtime, int, 0400); @@ -35,42 +38,39 @@ static struct clock_data cd = {  	.mult	= NSEC_PER_SEC / HZ,  }; -static u32 __read_mostly sched_clock_mask = 0xffffffff; +static u64 __read_mostly sched_clock_mask; -static u32 notrace jiffy_sched_clock_read(void) +static u64 notrace jiffy_sched_clock_read(void)  { -	return (u32)(jiffies - INITIAL_JIFFIES); +	/* +	 * We don't need to use get_jiffies_64 on 32-bit arches here +	 * because we register with BITS_PER_LONG +	 */ +	return (u64)(jiffies - INITIAL_JIFFIES);  } -static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; +static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;  static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)  {  	return (cyc * mult) >> shift;  } -static unsigned long long notrace sched_clock_32(void) +unsigned long long notrace sched_clock(void)  {  	u64 epoch_ns; -	u32 epoch_cyc; -	u32 cyc; +	u64 epoch_cyc; +	u64 cyc; +	unsigned long seq;  	if (cd.suspended)  		return cd.epoch_ns; -	/* -	 * Load the epoch_cyc and epoch_ns atomically.  We do this by -	 * ensuring that we always write epoch_cyc, epoch_ns and -	 * epoch_cyc_copy in strict order, and read them in strict order. -	 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in -	 * the middle of an update, and we should repeat the load. -	 */  	do { +		seq = raw_read_seqcount_begin(&cd.seq);  		epoch_cyc = cd.epoch_cyc; -		smp_rmb();  		epoch_ns = cd.epoch_ns; -		smp_rmb(); -	} while (epoch_cyc != cd.epoch_cyc_copy); +	} while (read_seqcount_retry(&cd.seq, seq));  	cyc = read_sched_clock();  	cyc = (cyc - epoch_cyc) & sched_clock_mask; @@ -83,49 +83,68 @@ static unsigned long long notrace sched_clock_32(void)  static void notrace update_sched_clock(void)  {  	unsigned long flags; -	u32 cyc; +	u64 cyc;  	u64 ns;  	cyc = read_sched_clock();  	ns = cd.epoch_ns +  		cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,  			  cd.mult, cd.shift); -	/* -	 * Write epoch_cyc and epoch_ns in a way that the update is -	 * detectable in cyc_to_fixed_sched_clock(). -	 */ +  	raw_local_irq_save(flags); -	cd.epoch_cyc_copy = cyc; -	smp_wmb(); +	raw_write_seqcount_begin(&cd.seq);  	cd.epoch_ns = ns; -	smp_wmb();  	cd.epoch_cyc = cyc; +	raw_write_seqcount_end(&cd.seq);  	raw_local_irq_restore(flags);  } -static void sched_clock_poll(unsigned long wrap_ticks) +static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)  { -	mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));  	update_sched_clock(); +	hrtimer_forward_now(hrt, cd.wrap_kt); +	return HRTIMER_RESTART;  } -void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) +void __init sched_clock_register(u64 (*read)(void), int bits, +				 unsigned long rate)  { -	unsigned long r, w; -	u64 res, wrap; +	u64 res, wrap, new_mask, new_epoch, cyc, ns; +	u32 new_mult, new_shift; +	ktime_t new_wrap_kt; +	unsigned long r;  	char r_unit;  	if (cd.rate > rate)  		return; -	BUG_ON(bits > 32);  	WARN_ON(!irqs_disabled()); -	read_sched_clock = read; -	sched_clock_mask = (1ULL << bits) - 1; -	cd.rate = rate;  	/* calculate the mult/shift to convert counter ticks to ns. */ -	clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); +	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); + +	new_mask = CLOCKSOURCE_MASK(bits); + +	/* calculate how many ns until we wrap */ +	wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); +	new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); + +	/* update epoch for new counter and update epoch_ns from old counter*/ +	new_epoch = read(); +	cyc = read_sched_clock(); +	ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, +			  cd.mult, cd.shift); + +	raw_write_seqcount_begin(&cd.seq); +	read_sched_clock = read; +	sched_clock_mask = new_mask; +	cd.rate = rate; +	cd.wrap_kt = new_wrap_kt; +	cd.mult = new_mult; +	cd.shift = new_shift; +	cd.epoch_cyc = new_epoch; +	cd.epoch_ns = ns; +	raw_write_seqcount_end(&cd.seq);  	r = rate;  	if (r >= 4000000) { @@ -137,27 +156,11 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)  	} else  		r_unit = ' '; -	/* calculate how many ns until we wrap */ -	wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); -	do_div(wrap, NSEC_PER_MSEC); -	w = wrap; -  	/* calculate the ns resolution of this counter */ -	res = cyc_to_ns(1ULL, cd.mult, cd.shift); -	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", -		bits, r, r_unit, res, w); +	res = cyc_to_ns(1ULL, new_mult, new_shift); -	/* -	 * Start the timer to keep sched_clock() properly updated and -	 * sets the initial epoch. -	 */ -	sched_clock_timer.data = msecs_to_jiffies(w - (w / 10)); -	update_sched_clock(); - -	/* -	 * Ensure that sched_clock() starts off at 0ns -	 */ -	cd.epoch_ns = 0; +	pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", +		bits, r, r_unit, res, wrap);  	/* Enable IRQ time accounting if we have a fast enough sched_clock */  	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) @@ -166,13 +169,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)  	pr_debug("Registered %pF as sched_clock source\n", read);  } -unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; - -unsigned long long notrace sched_clock(void) -{ -	return sched_clock_func(); -} -  void __init sched_clock_postinit(void)  {  	/* @@ -180,14 +176,23 @@ void __init sched_clock_postinit(void)  	 * make it the final one one.  	 */  	if (read_sched_clock == jiffy_sched_clock_read) -		setup_sched_clock(jiffy_sched_clock_read, 32, HZ); +		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); + +	update_sched_clock(); -	sched_clock_poll(sched_clock_timer.data); +	/* +	 * Start the timer to keep sched_clock() properly updated and +	 * sets the initial epoch. +	 */ +	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); +	sched_clock_timer.function = sched_clock_poll; +	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);  }  static int sched_clock_suspend(void)  { -	sched_clock_poll(sched_clock_timer.data); +	update_sched_clock(); +	hrtimer_cancel(&sched_clock_timer);  	cd.suspended = true;  	return 0;  } @@ -195,7 +200,7 @@ static int sched_clock_suspend(void)  static void sched_clock_resume(void)  {  	cd.epoch_cyc = read_sched_clock(); -	cd.epoch_cyc_copy = cd.epoch_cyc; +	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);  	cd.suspended = false;  }  | 
