aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/time.c')
-rw-r--r--arch/parisc/kernel/time.c71
1 files changed, 40 insertions, 31 deletions
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index b3496b592a2..bad7d1eb62b 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -34,25 +34,39 @@
static unsigned long clocktick __read_mostly; /* timer cycles per tick */
-#ifdef CONFIG_SMP
-extern void smp_do_timer(struct pt_regs *regs);
-#endif
-
-irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+/*
+ * We keep time on PA-RISC Linux by using the Interval Timer which is
+ * a pair of registers; one is read-only and one is write-only; both
+ * accessed through CR16. The read-only register is 32 or 64 bits wide,
+ * and increments by 1 every CPU clock tick. The architecture only
+ * guarantees us a rate between 0.5 and 2, but all implementations use a
+ * rate of 1. The write-only register is 32-bits wide. When the lowest
+ * 32 bits of the read-only register compare equal to the write-only
+ * register, it raises a maskable external interrupt. Each processor has
+ * an Interval Timer of its own and they are not synchronised.
+ *
+ * We want to generate an interrupt every 1/HZ seconds. So we program
+ * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
+ * is programmed with the intended time of the next tick. We can be
+ * held off for an arbitrarily long period of time by interrupts being
+ * disabled, so we may miss one or more ticks.
+ */
+irqreturn_t timer_interrupt(int irq, void *dev_id)
{
unsigned long now;
unsigned long next_tick;
- unsigned long cycles_elapsed;
+ unsigned long cycles_elapsed, ticks_elapsed;
unsigned long cycles_remainder;
unsigned int cpu = smp_processor_id();
+ struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu];
/* gcc can optimize for "read-only" case with a local clocktick */
unsigned long cpt = clocktick;
- profile_tick(CPU_PROFILING, regs);
+ profile_tick(CPU_PROFILING);
/* Initialize next_tick to the expected tick time. */
- next_tick = cpu_data[cpu].it_value;
+ next_tick = cpuinfo->it_value;
/* Get current interval timer.
* CR16 reads as 64 bits in CPU wide mode.
@@ -67,11 +81,14 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* of the more expensive div/mul method
*/
cycles_remainder = cycles_elapsed;
+ ticks_elapsed = 1;
while (cycles_remainder > cpt) {
cycles_remainder -= cpt;
+ ticks_elapsed++;
}
} else {
cycles_remainder = cycles_elapsed % cpt;
+ ticks_elapsed = 1 + cycles_elapsed / cpt;
}
/* Can we differentiate between "early CR16" (aka Scenario 1) and
@@ -81,18 +98,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* cycles after the IT fires. But it's arbitrary how much time passes
* before we call it "late". I've picked one second.
*/
-/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
-#if HZ == 1000
- if (cycles_elapsed > (cpt << 10) )
-#elif HZ == 250
- if (cycles_elapsed > (cpt << 8) )
-#elif HZ == 100
- if (cycles_elapsed > (cpt << 7) )
-#else
-#warn WTF is HZ set to anyway?
- if (cycles_elapsed > (HZ * cpt) )
-#endif
- {
+ if (ticks_elapsed > HZ) {
/* Scenario 3: very long delay? bad in any case */
printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
" cycles %lX rem %lX "
@@ -111,7 +117,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
*/
next_tick = now + cycles_remainder;
- cpu_data[cpu].it_value = next_tick;
+ cpuinfo->it_value = next_tick;
/* Skip one clocktick on purpose if we are likely to miss next_tick.
* We want to avoid the new next_tick being less than CR16.
@@ -122,21 +128,22 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
next_tick += cpt;
/* Program the IT when to deliver the next interrupt. */
- /* Only bottom 32-bits of next_tick are written to cr16. */
+ /* Only bottom 32-bits of next_tick are written to cr16. */
mtctl(next_tick, 16);
/* Done mucking with unreliable delivery of interrupts.
* Go do system house keeping.
*/
-#ifdef CONFIG_SMP
- smp_do_timer(regs);
-#else
- update_process_times(user_mode(regs));
-#endif
+
+ if (!--cpuinfo->prof_counter) {
+ cpuinfo->prof_counter = cpuinfo->prof_multiplier;
+ update_process_times(user_mode(get_irq_regs()));
+ }
+
if (cpu == 0) {
write_seqlock(&xtime_lock);
- do_timer(regs);
+ do_timer(ticks_elapsed);
write_sequnlock(&xtime_lock);
}
@@ -310,13 +317,15 @@ void __init time_init(void)
start_cpu_itimer(); /* get CPU 0 started */
- if(pdc_tod_read(&tod_data) == 0) {
- write_seqlock_irq(&xtime_lock);
+ if (pdc_tod_read(&tod_data) == 0) {
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
xtime.tv_sec = tod_data.tod_sec;
xtime.tv_nsec = tod_data.tod_usec * 1000;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- write_sequnlock_irq(&xtime_lock);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
} else {
printk(KERN_ERR "Error reading tod clock\n");
xtime.tv_sec = 0;