diff options
Diffstat (limited to 'arch/avr32/kernel/time.c')
| -rw-r--r-- | arch/avr32/kernel/time.c | 298 |
1 files changed, 110 insertions, 188 deletions
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index b0e6b5855a3..d0f771be9e9 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -1,238 +1,160 @@ /* - * Copyright (C) 2004-2006 Atmel Corporation - * - * Based on MIPS implementation arch/mips/kernel/time.c - * Copyright 2001 MontaVista Software Inc. + * Copyright (C) 2004-2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ - #include <linux/clk.h> -#include <linux/clocksource.h> -#include <linux/time.h> -#include <linux/module.h> +#include <linux/clockchips.h> +#include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/kernel_stat.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/profile.h> -#include <linux/sysdev.h> +#include <linux/kernel.h> +#include <linux/time.h> +#include <linux/cpu.h> -#include <asm/div64.h> #include <asm/sysreg.h> -#include <asm/io.h> -#include <asm/sections.h> - -static cycle_t read_cycle_count(void) -{ - return (cycle_t)sysreg_read(COUNT); -} - -static struct clocksource clocksource_avr32 = { - .name = "avr32", - .rating = 350, - .read = read_cycle_count, - .mask = CLOCKSOURCE_MASK(32), - .shift = 16, - .is_continuous = 1, -}; - -/* - * By default we provide the null RTC ops - */ -static unsigned long null_rtc_get_time(void) -{ - return mktime(2004, 1, 1, 0, 0, 0); -} -static int null_rtc_set_time(unsigned long sec) -{ - return 0; -} - -static unsigned long (*rtc_get_time)(void) = null_rtc_get_time; -static int (*rtc_set_time)(unsigned long) = null_rtc_set_time; - -/* how many counter cycles in a jiffy? */ -static unsigned long cycles_per_jiffy; - -/* cycle counter value at the previous timer interrupt */ -static unsigned int timerhi, timerlo; - -/* the count value for the next timer interrupt */ -static unsigned int expirelo; - -static void avr32_timer_ack(void) -{ - unsigned int count; - - /* Ack this timer interrupt and set the next one */ - expirelo += cycles_per_jiffy; - if (expirelo == 0) { - printk(KERN_DEBUG "expirelo == 0\n"); - sysreg_write(COMPARE, expirelo + 1); - } else { - sysreg_write(COMPARE, expirelo); - } +#include <mach/pm.h> - /* Check to see if we have missed any timer interrupts */ - count = sysreg_read(COUNT); - if ((count - expirelo) < 0x7fffffff) { - expirelo = count + cycles_per_jiffy; - sysreg_write(COMPARE, expirelo); - } -} - -static unsigned int avr32_hpt_read(void) -{ - return sysreg_read(COUNT); -} - -/* - * Taken from MIPS c0_hpt_timer_init(). - * - * Why is it so complicated, and what is "count"? My assumption is - * that `count' specifies the "reference cycle", i.e. the cycle since - * reset that should mean "zero". The reason COUNT is written twice is - * probably to make sure we don't get any timer interrupts while we - * are messing with the counter. - */ -static void avr32_hpt_init(unsigned int count) -{ - count = sysreg_read(COUNT) - count; - expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy; - sysreg_write(COUNT, expirelo - cycles_per_jiffy); - sysreg_write(COMPARE, expirelo); - sysreg_write(COUNT, count); -} -/* - * Scheduler clock - returns current time in nanosec units. - */ -unsigned long long sched_clock(void) +static cycle_t read_cycle_count(struct clocksource *cs) { - /* There must be better ways...? */ - return (unsigned long long)jiffies * (1000000000 / HZ); + return (cycle_t)sysreg_read(COUNT); } /* - * local_timer_interrupt() does profiling and process accounting on a - * per-CPU basis. - * - * In UP mode, it is invoked from the (global) timer_interrupt. + * The architectural cycle count registers are a fine clocksource unless + * the system idle loop use sleep states like "idle": the CPU cycles + * measured by COUNT (and COMPARE) don't happen during sleep states. + * Their duration also changes if cpufreq changes the CPU clock rate. + * So we rate the clocksource using COUNT as very low quality. */ -static void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) -{ - if (current->pid) - profile_tick(CPU_PROFILING, regs); - update_process_times(user_mode(regs)); -} +static struct clocksource counter = { + .name = "avr32_counter", + .rating = 50, + .read = read_cycle_count, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; -static irqreturn_t -timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +static irqreturn_t timer_interrupt(int irq, void *dev_id) { - unsigned int count; + struct clock_event_device *evdev = dev_id; - /* ack timer interrupt and try to set next interrupt */ - count = avr32_hpt_read(); - avr32_timer_ack(); - - /* Update timerhi/timerlo for intra-jiffy calibration */ - timerhi += count < timerlo; /* Wrap around */ - timerlo = count; + if (unlikely(!(intc_get_pending(0) & 1))) + return IRQ_NONE; /* - * Call the generic timer interrupt handler + * Disable the interrupt until the clockevent subsystem + * reprograms it. */ - write_seqlock(&xtime_lock); - do_timer(regs); - write_sequnlock(&xtime_lock); - - /* - * In UP mode, we call local_timer_interrupt() to do profiling - * and process accounting. - * - * SMP is not supported yet. - */ - local_timer_interrupt(irq, dev_id, regs); + sysreg_write(COMPARE, 0); + evdev->event_handler(evdev); return IRQ_HANDLED; } static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, - .name = "timer", + /* Oprofile uses the same irq as the timer, so allow it to be shared */ + .flags = IRQF_TIMER | IRQF_SHARED, + .name = "avr32_comparator", }; -void __init time_init(void) +static int comparator_next_event(unsigned long delta, + struct clock_event_device *evdev) { - unsigned long mult, shift, count_hz; - int ret; + unsigned long flags; - xtime.tv_sec = rtc_get_time(); - xtime.tv_nsec = 0; + raw_local_irq_save(flags); - set_normalized_timespec(&wall_to_monotonic, - -xtime.tv_sec, -xtime.tv_nsec); + /* The time to read COUNT then update COMPARE must be less + * than the min_delta_ns value for this clockevent source. + */ + sysreg_write(COMPARE, (sysreg_read(COUNT) + delta) ? : 1); - printk("Before time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); + raw_local_irq_restore(flags); - count_hz = clk_get_rate(boot_cpu_data.clk); - shift = clocksource_avr32.shift; - mult = clocksource_hz2mult(count_hz, shift); - clocksource_avr32.mult = mult; + return 0; +} - printk("Cycle counter: mult=%lu, shift=%lu\n", mult, shift); +static void comparator_mode(enum clock_event_mode mode, + struct clock_event_device *evdev) +{ + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + pr_debug("%s: start\n", evdev->name); + /* FALLTHROUGH */ + case CLOCK_EVT_MODE_RESUME: + /* + * If we're using the COUNT and COMPARE registers we + * need to force idle poll. + */ + cpu_idle_poll_ctrl(true); + break; + case CLOCK_EVT_MODE_UNUSED: + case CLOCK_EVT_MODE_SHUTDOWN: + sysreg_write(COMPARE, 0); + pr_debug("%s: stop\n", evdev->name); + if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || + evdev->mode == CLOCK_EVT_MODE_RESUME) { + /* + * Only disable idle poll if we have forced that + * in a previous call. + */ + cpu_idle_poll_ctrl(false); + } + break; + default: + BUG(); + } +} - { - u64 tmp; +static struct clock_event_device comparator = { + .name = "avr32_comparator", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 16, + .rating = 50, + .set_next_event = comparator_next_event, + .set_mode = comparator_mode, +}; - tmp = TICK_NSEC; - tmp <<= shift; - tmp += mult / 2; - do_div(tmp, mult); +void read_persistent_clock(struct timespec *ts) +{ + ts->tv_sec = mktime(2007, 1, 1, 0, 0, 0); + ts->tv_nsec = 0; +} - cycles_per_jiffy = tmp; - } +void __init time_init(void) +{ + unsigned long counter_hz; + int ret; - /* This sets up the high precision timer for the first interrupt. */ - avr32_hpt_init(avr32_hpt_read()); + /* figure rate for counter */ + counter_hz = clk_get_rate(boot_cpu_data.clk); + ret = clocksource_register_hz(&counter, counter_hz); + if (ret) + pr_debug("timer: could not register clocksource: %d\n", ret); - printk("After time_init: count=%08lx, compare=%08lx\n", - (unsigned long)sysreg_read(COUNT), - (unsigned long)sysreg_read(COMPARE)); + /* setup COMPARE clockevent */ + comparator.mult = div_sc(counter_hz, NSEC_PER_SEC, comparator.shift); + comparator.max_delta_ns = clockevent_delta2ns((u32)~0, &comparator); + comparator.min_delta_ns = clockevent_delta2ns(50, &comparator) + 1; + comparator.cpumask = cpumask_of(0); - ret = clocksource_register(&clocksource_avr32); - if (ret) - printk(KERN_ERR - "timer: could not register clocksource: %d\n", ret); + sysreg_write(COMPARE, 0); + timer_irqaction.dev_id = &comparator; ret = setup_irq(0, &timer_irqaction); if (ret) - printk("timer: could not request IRQ 0: %d\n", ret); -} - -static struct sysdev_class timer_class = { - set_kset_name("timer"), -}; + pr_debug("timer: could not request IRQ 0: %d\n", ret); + else { + clockevents_register_device(&comparator); -static struct sys_device timer_device = { - .id = 0, - .cls = &timer_class, -}; - -static int __init init_timer_sysfs(void) -{ - int err = sysdev_class_register(&timer_class); - if (!err) - err = sysdev_register(&timer_device); - return err; + pr_info("%s: irq 0, %lu.%03lu MHz\n", comparator.name, + ((counter_hz + 500) / 1000) / 1000, + ((counter_hz + 500) / 1000) % 1000); + } } - -device_initcall(init_timer_sysfs); |
