diff options
Diffstat (limited to 'arch/ia64/kernel/time.c')
| -rw-r--r-- | arch/ia64/kernel/time.c | 142 |
1 files changed, 54 insertions, 88 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index a35c661e5e8..71c52bc7c28 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -19,7 +19,7 @@ #include <linux/interrupt.h> #include <linux/efi.h> #include <linux/timex.h> -#include <linux/clocksource.h> +#include <linux/timekeeper_internal.h> #include <linux/platform_device.h> #include <asm/machvec.h> @@ -29,15 +29,12 @@ #include <asm/ptrace.h> #include <asm/sal.h> #include <asm/sections.h> -#include <asm/system.h> #include "fsyscall_gtod_data.h" static cycle_t itc_get_cycles(struct clocksource *cs); -struct fsyscall_gtod_data_t fsyscall_gtod_data = { - .lock = SEQLOCK_UNLOCKED, -}; +struct fsyscall_gtod_data_t fsyscall_gtod_data; struct itc_jitter_data_t itc_jitter_data; @@ -61,7 +58,7 @@ unsigned long long sched_clock(void) #ifdef CONFIG_PARAVIRT static void -paravirt_clocksource_resume(void) +paravirt_clocksource_resume(struct clocksource *cs) { if (pv_time_ops.clocksource_resume) pv_time_ops.clocksource_resume(); @@ -73,8 +70,6 @@ static struct clocksource clocksource_itc = { .rating = 350, .read = itc_get_cycles, .mask = CLOCKSOURCE_MASK(64), - .mult = 0, /*to be calculated*/ - .shift = 16, .flags = CLOCK_SOURCE_IS_CONTINUOUS, #ifdef CONFIG_PARAVIRT .resume = paravirt_clocksource_resume, @@ -82,38 +77,35 @@ static struct clocksource clocksource_itc = { }; static struct clocksource *itc_clocksource; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #include <linux/kernel_stat.h> extern cputime_t cycle_to_cputime(u64 cyc); +void vtime_account_user(struct task_struct *tsk) +{ + cputime_t delta_utime; + struct thread_info *ti = task_thread_info(tsk); + + if (ti->ac_utime) { + delta_utime = cycle_to_cputime(ti->ac_utime); + account_user_time(tsk, delta_utime, delta_utime); + ti->ac_utime = 0; + } +} + /* * Called from the context switch with interrupts disabled, to charge all * accumulated times to the current process, and to prepare accounting on * the next process. */ -void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) +void arch_vtime_task_switch(struct task_struct *prev) { struct thread_info *pi = task_thread_info(prev); - struct thread_info *ni = task_thread_info(next); - cputime_t delta_stime, delta_utime; - __u64 now; - - now = ia64_get_itc(); + struct thread_info *ni = task_thread_info(current); - delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); - if (idle_task(smp_processor_id()) != prev) - account_system_time(prev, 0, delta_stime, delta_stime); - else - account_idle_time(delta_stime); - - if (pi->ac_utime) { - delta_utime = cycle_to_cputime(pi->ac_utime); - account_user_time(prev, delta_utime, delta_utime); - } - - pi->ac_stamp = ni->ac_stamp = now; + pi->ac_stamp = ni->ac_stamp; ni->ac_stime = ni->ac_utime = 0; } @@ -121,54 +113,44 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) * Account time for a transition between system, hard irq or soft irq state. * Note that this function is called with interrupts enabled. */ -void account_system_vtime(struct task_struct *tsk) +static cputime_t vtime_delta(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - unsigned long flags; cputime_t delta_stime; __u64 now; - local_irq_save(flags); + WARN_ON_ONCE(!irqs_disabled()); now = ia64_get_itc(); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); - if (irq_count() || idle_task(smp_processor_id()) != tsk) - account_system_time(tsk, 0, delta_stime, delta_stime); - else - account_idle_time(delta_stime); ti->ac_stime = 0; - ti->ac_stamp = now; - local_irq_restore(flags); + return delta_stime; } -EXPORT_SYMBOL_GPL(account_system_vtime); -/* - * Called from the timer interrupt handler to charge accumulated user time - * to the current process. Must be called with interrupts disabled. - */ -void account_process_tick(struct task_struct *p, int user_tick) +void vtime_account_system(struct task_struct *tsk) { - struct thread_info *ti = task_thread_info(p); - cputime_t delta_utime; + cputime_t delta = vtime_delta(tsk); - if (ti->ac_utime) { - delta_utime = cycle_to_cputime(ti->ac_utime); - account_user_time(p, delta_utime, delta_utime); - ti->ac_utime = 0; - } + account_system_time(tsk, 0, delta, delta); +} +EXPORT_SYMBOL_GPL(vtime_account_system); + +void vtime_account_idle(struct task_struct *tsk) +{ + account_idle_time(vtime_delta(tsk)); } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ static irqreturn_t timer_interrupt (int irq, void *dev_id) { unsigned long new_itm; - if (unlikely(cpu_is_offline(smp_processor_id()))) { + if (cpu_is_offline(smp_processor_id())) { return IRQ_HANDLED; } @@ -190,19 +172,10 @@ timer_interrupt (int irq, void *dev_id) new_itm += local_cpu_data->itm_delta; - if (smp_processor_id() == time_keeper_id) { - /* - * Here we are in the timer irq handler. We have irqs locally - * disabled, but we don't know if the timer_bh is running on - * another CPU. We need to avoid to SMP race by acquiring the - * xtime_lock. - */ - write_seqlock(&xtime_lock); - do_timer(1); - local_cpu_data->itm_next = new_itm; - write_sequnlock(&xtime_lock); - } else - local_cpu_data->itm_next = new_itm; + if (smp_processor_id() == time_keeper_id) + xtime_update(1); + + local_cpu_data->itm_next = new_itm; if (time_after(new_itm, ia64_get_itc())) break; @@ -222,7 +195,7 @@ skip_process_time_accounting: * comfort, we increase the safety margin by * intentionally dropping the next tick(s). We do NOT * update itm.next because that would force us to call - * do_timer() which in turn would let our clock run + * xtime_update() which in turn would let our clock run * too fast (with the potentially devastating effect * of losing monotony of time). */ @@ -271,8 +244,7 @@ static int __init nojitter_setup(char *str) __setup("nojitter", nojitter_setup); -void __devinit -ia64_init_itm (void) +void ia64_init_itm(void) { unsigned long platform_base_freq, itc_freq; struct pal_freq_ratio itc_ratio, proc_ratio; @@ -374,11 +346,8 @@ ia64_init_itm (void) ia64_cpu_local_tick(); if (!itc_clocksource) { - /* Sort out mult/shift values: */ - clocksource_itc.mult = - clocksource_hz2mult(local_cpu_data->itc_freq, - clocksource_itc.shift); - clocksource_register(&clocksource_itc); + clocksource_register_hz(&clocksource_itc, + local_cpu_data->itc_freq); itc_clocksource = &clocksource_itc; } } @@ -411,7 +380,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs) static struct irqaction timer_irqaction = { .handler = timer_interrupt, - .flags = IRQF_DISABLED | IRQF_IRQPOLL, + .flags = IRQF_IRQPOLL, .name = "timer" }; @@ -430,18 +399,16 @@ static int __init rtc_init(void) } module_init(rtc_init); +void read_persistent_clock(struct timespec *ts) +{ + efi_gettimeofday(ts); +} + void __init time_init (void) { register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); - efi_gettimeofday(&xtime); ia64_init_itm(); - - /* - * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the - * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC). - */ - set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); } /* @@ -473,25 +440,24 @@ void update_vsyscall_tz(void) { } -void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) +void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, + struct clocksource *c, u32 mult) { - unsigned long flags; - - write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); + write_seqcount_begin(&fsyscall_gtod_data.seq); /* copy fsyscall clock data */ fsyscall_gtod_data.clk_mask = c->mask; fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; - fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; + fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; fsyscall_gtod_data.clk_cycle_last = c->cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; - fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec + fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec + wall->tv_sec; - fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec + fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec + wall->tv_nsec; /* normalize */ @@ -500,6 +466,6 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) fsyscall_gtod_data.monotonic_time.tv_sec++; } - write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); + write_seqcount_end(&fsyscall_gtod_data.seq); } |
