diff options
Diffstat (limited to 'arch/mips/kernel/sync-r4k.c')
| -rw-r--r-- | arch/mips/kernel/sync-r4k.c | 77 |
1 files changed, 25 insertions, 52 deletions
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c index 9021108eb9c..2242bdd4370 100644 --- a/arch/mips/kernel/sync-r4k.c +++ b/arch/mips/kernel/sync-r4k.c @@ -1,60 +1,49 @@ /* * Count register synchronisation. * - * All CPUs will have their count registers synchronised to the CPU0 expirelo + * All CPUs will have their count registers synchronised to the CPU0 next time * value. This can cause a small timewarp for CPU0. All other CPU's should * not have done anything significant (but they may have had interrupts * enabled briefly - prom_smp_finish() should not be responsible for enabling * interrupts...) - * - * FIXME: broken for SMTC */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/irqflags.h> -#include <linux/r4k-timer.h> +#include <linux/cpumask.h> -#include <asm/atomic.h> +#include <asm/r4k-timer.h> +#include <linux/atomic.h> #include <asm/barrier.h> -#include <asm/cpumask.h> #include <asm/mipsregs.h> -static atomic_t __initdata count_start_flag = ATOMIC_INIT(0); -static atomic_t __initdata count_count_start = ATOMIC_INIT(0); -static atomic_t __initdata count_count_stop = ATOMIC_INIT(0); +static atomic_t count_start_flag = ATOMIC_INIT(0); +static atomic_t count_count_start = ATOMIC_INIT(0); +static atomic_t count_count_stop = ATOMIC_INIT(0); +static atomic_t count_reference = ATOMIC_INIT(0); -#define COUNTON 100 +#define COUNTON 100 #define NR_LOOPS 5 -void __init synchronise_count_master(void) +void synchronise_count_master(int cpu) { int i; unsigned long flags; unsigned int initcount; - int nslaves; - -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - pr_info("Checking COUNT synchronization across %u CPUs: ", - num_online_cpus()); + printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); /* * Notify the slaves that it's time to start */ - atomic_set(&count_start_flag, 1); + atomic_set(&count_reference, read_c0_count()); + atomic_set(&count_start_flag, cpu); smp_wmb(); - /* Count will be initialised to expirelo for all CPU's */ - initcount = expirelo; + /* Count will be initialised to current timer for all CPU's */ + initcount = read_c0_count(); /* * We loop a few times to get a primed instruction cache, @@ -67,10 +56,9 @@ void __init synchronise_count_master(void) * two CPUs. */ - nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= ncpus' */ - while (atomic_read(&count_count_start) != nslaves) + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); @@ -87,7 +75,7 @@ void __init synchronise_count_master(void) /* * Wait for all slaves to leave the synchronization point: */ - while (atomic_read(&count_count_stop) != nslaves) + while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); @@ -95,6 +83,7 @@ void __init synchronise_count_master(void) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + atomic_set(&count_start_flag, 0); local_irq_restore(flags); @@ -106,38 +95,25 @@ void __init synchronise_count_master(void) printk("done.\n"); } -void __init synchronise_count_slave(void) +void synchronise_count_slave(int cpu) { int i; - unsigned long flags; unsigned int initcount; - int ncpus; - -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC needs to synchronise per VPE, not per CPU - * ignore for now - */ - return; -#endif - - local_irq_save(flags); /* * Not every cpu is online at the time this gets called, * so we first wait for the master to say everyone is ready */ - while (!atomic_read(&count_start_flag)) + while (atomic_read(&count_start_flag) != cpu) mb(); - /* Count will be initialised to expirelo for all CPU's */ - initcount = expirelo; + /* Count will be initialised to next expire for all CPU's */ + initcount = atomic_read(&count_reference); - ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != ncpus) + while (atomic_read(&count_count_start) != 2) mb(); /* @@ -147,13 +123,10 @@ void __init synchronise_count_slave(void) write_c0_count(initcount); atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != ncpus) + while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); - - local_irq_restore(flags); } #undef NR_LOOPS -#endif |
