diff options
Diffstat (limited to 'arch/ia64/kernel/time.c')
| -rw-r--r-- | arch/ia64/kernel/time.c | 192 |
1 files changed, 141 insertions, 51 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 17fda5293c6..71c52bc7c28 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -19,23 +19,22 @@ #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> -#include <linux/clocksource.h> +#include <linux/timekeeper_internal.h> +#include <linux/platform_device.h> #include <asm/machvec.h> #include <asm/delay.h> #include <asm/hw_irq.h> +#include <asm/paravirt.h> #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/sections.h> -#include <asm/system.h> #include "fsyscall_gtod_data.h" -static cycle_t itc_get_cycles(void); +static cycle_t itc_get_cycles(struct clocksource *cs); -struct fsyscall_gtod_data_t fsyscall_gtod_data = { - .lock = SEQLOCK_UNLOCKED, -}; +struct fsyscall_gtod_data_t fsyscall_gtod_data; struct itc_jitter_data_t itc_jitter_data; @@ -48,23 +47,110 @@ EXPORT_SYMBOL(last_cli_ip); #endif +#ifdef CONFIG_PARAVIRT +/* We need to define a real function for sched_clock, to override the + weak default version */ +unsigned long long sched_clock(void) +{ + return paravirt_sched_clock(); +} +#endif + +#ifdef CONFIG_PARAVIRT +static void +paravirt_clocksource_resume(struct clocksource *cs) +{ + if (pv_time_ops.clocksource_resume) + pv_time_ops.clocksource_resume(); +} +#endif + static struct clocksource clocksource_itc = { .name = "itc", .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /*to be calculated*/ - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, +#ifdef CONFIG_PARAVIRT + .resume = paravirt_clocksource_resume, +#endif }; static struct clocksource *itc_clocksource; +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + +#include <linux/kernel_stat.h> + +extern cputime_t cycle_to_cputime(u64 cyc); + +void vtime_account_user(struct task_struct *tsk) +{ + cputime_t delta_utime; + struct thread_info *ti = task_thread_info(tsk); + + if (ti->ac_utime) { + delta_utime = cycle_to_cputime(ti->ac_utime); + account_user_time(tsk, delta_utime, delta_utime); + ti->ac_utime = 0; + } +} + +/* + * Called from the context switch with interrupts disabled, to charge all + * accumulated times to the current process, and to prepare accounting on + * the next process. + */ +void arch_vtime_task_switch(struct task_struct *prev) +{ + struct thread_info *pi = task_thread_info(prev); + struct thread_info *ni = task_thread_info(current); + + pi->ac_stamp = ni->ac_stamp; + ni->ac_stime = ni->ac_utime = 0; +} + +/* + * Account time for a transition between system, hard irq or soft irq state. + * Note that this function is called with interrupts enabled. + */ +static cputime_t vtime_delta(struct task_struct *tsk) +{ + struct thread_info *ti = task_thread_info(tsk); + cputime_t delta_stime; + __u64 now; + + WARN_ON_ONCE(!irqs_disabled()); + + now = ia64_get_itc(); + + delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); + ti->ac_stime = 0; + ti->ac_stamp = now; + + return delta_stime; +} + +void vtime_account_system(struct task_struct *tsk) +{ + cputime_t delta = vtime_delta(tsk); + + account_system_time(tsk, 0, delta, delta); +} +EXPORT_SYMBOL_GPL(vtime_account_system); + +void vtime_account_idle(struct task_struct *tsk) +{ + account_idle_time(vtime_delta(tsk)); +} + +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ + static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; - if (unlikely(cpu_is_offline(smp_processor_id()))) { + if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } @@ -78,24 +164,18 @@ timer_interrupt (int irq, void *dev_id) profile_tick(CPU_PROFILING); + if (paravirt_do_steal_accounting(&new_itm)) + goto skip_process_time_accounting; + while (1) { update_process_times(user_mode(get_irq_regs())); new_itm += local_cpu_data->itm_delta; - if (smp_processor_id() == time_keeper_id) { - /* - * Here we are in the timer irq handler. We have irqs locally - * disabled, but we don't know if the timer_bh is running on - * another CPU. We need to avoid to SMP race by acquiring the - * xtime_lock. - */ - write_seqlock(&xtime_lock); - do_timer(1); - local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); - } else - local_cpu_data->itm_next = new_itm; + if (smp_processor_id() == time_keeper_id) + xtime_update(1); + + local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; @@ -107,13 +187,15 @@ timer_interrupt (int irq, void *dev_id) local_irq_disable(); } +skip_process_time_accounting: + do { /* * If we're too close to the next clock tick for * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call - * do_timer() which in turn would let our clock run + * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ @@ -162,8 +244,7 @@ static int __init nojitter_setup(char *str) __setup("nojitter", nojitter_setup); -void __devinit -ia64_init_itm (void) +void ia64_init_itm(void) { unsigned long platform_base_freq, itc_freq; struct pal_freq_ratio itc_ratio, proc_ratio; @@ -256,22 +337,24 @@ ia64_init_itm (void) */ clocksource_itc.rating = 50; + paravirt_init_missing_ticks_accounting(smp_processor_id()); + + /* avoid softlock up message when cpu is unplug and plugged again. */ + touch_softlockup_watchdog(); + /* Setup the CPU local timer tick */ ia64_cpu_local_tick(); if (!itc_clocksource) { - /* Sort out mult/shift values: */ - clocksource_itc.mult = - clocksource_hz2mult(local_cpu_data->itc_freq, - clocksource_itc.shift); - clocksource_register(&clocksource_itc); + clocksource_register_hz(&clocksource_itc, + local_cpu_data->itc_freq); itc_clocksource = &clocksource_itc; } } -static cycle_t itc_get_cycles(void) +static cycle_t itc_get_cycles(struct clocksource *cs) { - u64 lcycle, now, ret; + unsigned long lcycle, now, ret; if (!itc_jitter_data.itc_jitter) return get_cycles(); @@ -297,27 +380,35 @@ static cycle_t itc_get_cycles(void) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .flags = IRQF_IRQPOLL, .name = "timer" }; -void __devinit ia64_disable_timer(void) +static struct platform_device rtc_efi_dev = { + .name = "rtc-efi", + .id = -1, +}; + +static int __init rtc_init(void) +{ + if (platform_device_register(&rtc_efi_dev) < 0) + printk(KERN_ERR "unable to register rtc device...\n"); + + /* not necessarily an error */ + return 0; +} +module_init(rtc_init); + +void read_persistent_clock(struct timespec *ts) { - ia64_set_itv(1 << 16); + efi_gettimeofday(ts); } void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); - efi_gettimeofday(&xtime); ia64_init_itm(); - - /* - * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the - * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC). - */ - set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } /* @@ -349,25 +440,24 @@ void update_vsyscall_tz(void) { } -void update_vsyscall(struct timespec *wall, struct clocksource *c) +void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, + struct clocksource *c, u32 mult) { - unsigned long flags; - - write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + write_seqcount_begin(&fsyscall_gtod_data.seq); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; - fsyscall_gtod_data.clk_mult = c->mult; + fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; - fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; + fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; - fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec + fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec + wall->tv_sec; - fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec + fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec + wall->tv_nsec; /* normalize */ @@ -376,6 +466,6 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c) fsyscall_gtod_data.monotonic_time.tv_sec++; } - write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); + write_seqcount_end(&fsyscall_gtod_data.seq); } |
