diff options
author | David S. Miller <davem@davemloft.net> | 2008-10-08 14:56:41 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-10-08 14:56:41 -0700 |
commit | 4dd565134ece7e5d528d4c5288879310c54419e9 (patch) | |
tree | e08910d2d0feae0c030f8f01acc9b03eb760ad9a /arch | |
parent | 071d7ab6649eb34a873a53e71635186e9117101d (diff) | |
parent | 69849375d6b13e94d08cdc94b49b11fbab454a0e (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts:
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
Diffstat (limited to 'arch')
36 files changed, 866 insertions, 457 deletions
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index f6679989103..1a873b36a4a 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h @@ -11,6 +11,9 @@ #include <asm-generic/sections.h> extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; +#ifdef CONFIG_SMP +extern char __cpu0_per_cpu[]; +#endif extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; extern char __start___rse_patchlist[], __end___rse_patchlist[]; extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 8bdea8eb62e..66e491d8baa 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -367,16 +367,17 @@ start_ap: ;; #else (isAP) br.few 2f - mov r20=r19 - sub r19=r19,r18 + movl r20=__cpu0_per_cpu ;; shr.u r18=r18,3 1: - ld8 r21=[r20],8;; - st8[r19]=r21,8 + ld8 r21=[r19],8;; + st8[r20]=r21,8 adds r18=-1,r18;; cmp4.lt p7,p6=0,r18 (p7) br.cond.dptk.few 1b + mov r19=r20 + ;; 2: #endif tpa r19=r19 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index de71da811cd..10a7d47e851 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -215,9 +215,6 @@ SECTIONS /* Per-cpu data: */ percpu : { } :percpu . = ALIGN(PERCPU_PAGE_SIZE); -#ifdef CONFIG_SMP - . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ -#endif __phys_per_cpu_start = .; .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) { @@ -233,6 +230,11 @@ SECTIONS data : { } :data .data : AT(ADDR(.data) - LOAD_OFFSET) { +#ifdef CONFIG_SMP + . = ALIGN(PERCPU_PAGE_SIZE); + __cpu0_per_cpu = .; + . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ +#endif DATA_DATA *(.data1) *(.gnu.linkonce.d*) diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index e566ff43884..0ee085efbe2 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -163,7 +163,7 @@ per_cpu_init (void) * get_zeroed_page(). */ if (first_time) { - void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; + void *cpu0_data = __cpu0_per_cpu; first_time=0; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 78026aabaa7..d8c5fcd89e5 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) for_each_possible_early_cpu(cpu) { if (cpu == 0) { - void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; + void *cpu0_data = __cpu0_per_cpu; __per_cpu_offset[cpu] = (char*)cpu0_data - __per_cpu_start; } else if (node == node_cpuid[cpu].nid) { diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d7..1e06d233fa8 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -211,6 +211,7 @@ config MIPS_MALTA select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN + select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken select SYS_SUPPORTS_MULTITHREADING select SYS_SUPPORTS_SMARTMIPS help @@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC depends on CPU_MIPS32_R2 #depends on CPU_MIPS64_R2 # once there is hardware ... depends on SYS_SUPPORTS_MULTITHREADING - select GENERIC_CLOCKEVENTS_BROADCAST select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select MIPS_MT @@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER Includes a loader for loading an elf relocatable object onto another VPE and running it. -config MIPS_MT_SMTC_INSTANT_REPLAY - bool "Low-latency Dispatch of Deferred SMTC IPIs" - depends on MIPS_MT_SMTC && !PREEMPT - default y - help - SMTC pseudo-interrupts between TCs are deferred and queued - if the target TC is interrupt-inhibited (IXMT). In the first - SMTC prototypes, these queued IPIs were serviced on return - to user mode, or on entry into the kernel idle loop. The - INSTANT_REPLAY option dispatches them as part of local_irq_restore() - processing, which adds runtime overhead (hence the option to turn - it off), but ensures that IPIs are handled promptly even under - heavy I/O interrupt load. - config MIPS_MT_SMTC_IM_BACKSTOP bool "Use per-TC register bits as backstop for inhibited IM bits" depends on MIPS_MT_SMTC - default y + default n help To support multiple TC microthreads acting as "CPUs" within a VPE, VPE-wide interrupt mask bits must be specially manipulated during interrupt handling. To support legacy drivers and interrupt controller management code, SMTC has a "backstop" to track and if necessary restore the interrupt mask. This has some performance - impact on interrupt service overhead. Disable it only if you know - what you are doing. + impact on interrupt service overhead. config MIPS_MT_SMTC_IRQAFF bool "Support IRQ affinity API" @@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) for SMTC Linux kernel. Requires platform support, of which an example can be found in the MIPS kernel i8259 and Malta - platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY - be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to - interrupt dispatch, and should be used only if you know what - you are doing. + platform code. Adds some overhead to interrupt dispatch, and + should be used only if you know what you are doing. config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" @@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD "exit" syscall notifying other kernel modules the SP program is exiting. You probably want to say yes here. +config MIPS_CMP + bool "MIPS CMP framework support" + depends on SYS_SUPPORTS_MIPS_CMP + select SYNC_R4K if BROKEN + select SYS_SUPPORTS_SMP + select SYS_SUPPORTS_SCHED_SMT if SMP + select WEAK_ORDERING + default n + help + This is a placeholder option for the GCMP work. It will need to + be handled differently... + config SB1_PASS_1_WORKAROUNDS bool depends on CPU_SB1_PASS_1 @@ -1693,6 +1688,9 @@ config SMP config SMP_UP bool +config SYS_SUPPORTS_MIPS_CMP + bool + config SYS_SUPPORTS_SMP bool @@ -1740,17 +1738,6 @@ config NR_CPUS performance should round up your number of processors to the next power of two. -config MIPS_CMP - bool "MIPS CMP framework support" - depends on SMP - select SYNC_R4K - select SYS_SUPPORTS_SCHED_SMT - select WEAK_ORDERING - default n - help - This is a placeholder option for the GCMP work. It will need to - be handled differently... - source "kernel/time/Kconfig" # diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f9397479..25775cb5400 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o +obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0..4a4c59f2737 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -12,6 +12,14 @@ #include <asm/smtc_ipi.h> #include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * The SMTC Kernel for the 34K, 1004K, et. al. replaces several + * of these routines with SMTC-specific variants. + */ + +#ifndef CONFIG_MIPS_MT_SMTC static int mips_next_event(unsigned long delta, struct clock_event_device *evt) @@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, unsigned int cnt; int res; -#ifdef CONFIG_MIPS_MT_SMTC - { - unsigned long flags, vpflags; - local_irq_save(flags); - vpflags = dvpe(); -#endif cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; -#ifdef CONFIG_MIPS_MT_SMTC - evpe(vpflags); - local_irq_restore(flags); - } -#endif return res; } -static void mips_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +#endif /* CONFIG_MIPS_MT_SMTC */ + +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { /* Nothing to do ... */ } -static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); -static int cp0_timer_irq_installed; +DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); +int cp0_timer_irq_installed; -/* - * Timer ack for an R4k-compatible timer of a known frequency. - */ -static void c0_timer_ack(void) -{ - write_c0_compare(read_c0_compare()); -} +#ifndef CONFIG_MIPS_MT_SMTC -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq(int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} - -static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; @@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) * interrupt. Being the paranoiacs we are we check anyway. */ if (!r2 || (read_c0_cause() & (1 << 30))) { - c0_timer_ack(); -#ifdef CONFIG_MIPS_MT_SMTC - if (cpu_data[cpu].vpe_id) - goto out; - cpu = 0; -#endif + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } @@ -107,65 +78,16 @@ out: return IRQ_HANDLED; } -static struct irqaction c0_compare_irqaction = { +#endif /* Not CONFIG_MIPS_MT_SMTC */ + +struct irqaction c0_compare_irqaction = { .handler = c0_compare_interrupt, -#ifdef CONFIG_MIPS_MT_SMTC - .flags = IRQF_DISABLED, -#else .flags = IRQF_DISABLED | IRQF_PERCPU, -#endif .name = "timer", }; -#ifdef CONFIG_MIPS_MT_SMTC -DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); - -static void smtc_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) -{ -} - -static void mips_broadcast(cpumask_t mask) -{ - unsigned int cpu; - - for_each_cpu_mask(cpu, mask) - smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); -} - -static void setup_smtc_dummy_clockevent_device(void) -{ - //uint64_t mips_freq = mips_hpt_^frequency; - unsigned int cpu = smp_processor_id(); - struct clock_event_device *cd; - cd = &per_cpu(smtc_dummy_clockevent_device, cpu); - - cd->name = "SMTC"; - cd->features = CLOCK_EVT_FEAT_DUMMY; - - /* Calculate the min / max delta */ - cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); - cd->shift = 0; //32; - cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); - cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); - - cd->rating = 200; - cd->irq = 17; //-1; -// if (cpu) -// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); -// else - cd->cpumask = cpumask_of_cpu(cpu); - - cd->set_mode = smtc_set_mode; - - cd->broadcast = mips_broadcast; - - clockevents_register_device(cd); -} -#endif - -static void mips_event_handler(struct clock_event_device *dev) +void mips_event_handler(struct clock_event_device *dev) { } @@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) return (read_c0_cause() >> cp0_compare_irq) & 0x100; } -static int c0_compare_int_usable(void) +/* + * Compare interrupt can be routed and latched outside the core, + * so a single execution hazard barrier may not be enough to give + * it time to clear as seen in the Cause register. 4 time the + * pipeline depth seems reasonably conservative, and empirically + * works better in configurations with high CPU/bus clock ratios. + */ + +#define compare_change_hazard() \ + do { \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + irq_disable_hazard(); \ + } while (0) + +int c0_compare_int_usable(void) { unsigned int delta; unsigned int cnt; @@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) */ if (c0_compare_int_pending()) { write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; } @@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) cnt = read_c0_count(); cnt += delta; write_c0_compare(cnt); - irq_disable_hazard(); + compare_change_hazard(); if ((int)(read_c0_count() - cnt) < 0) break; /* increase delta if the timer was already expired */ @@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) while ((int)(read_c0_count() - cnt) <= 0) ; /* Wait for expiry */ + compare_change_hazard(); if (!c0_compare_int_pending()) return 0; write_c0_compare(read_c0_count()); - irq_disable_hazard(); + compare_change_hazard(); if (c0_compare_int_pending()) return 0; @@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) return 1; } +#ifndef CONFIG_MIPS_MT_SMTC + int __cpuinit mips_clockevent_init(void) { uint64_t mips_freq = mips_hpt_frequency; @@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) if (!cpu_has_counter || !mips_hpt_frequency) return -ENXIO; -#ifdef CONFIG_MIPS_MT_SMTC - setup_smtc_dummy_clockevent_device(); - - /* - * On SMTC we only register VPE0's compare interrupt as clockevent - * device. - */ - if (cpu) - return 0; -#endif - if (!c0_compare_int_usable()) return -ENXIO; @@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) cd->rating = 300; cd->irq = irq; -#ifdef CONFIG_MIPS_MT_SMTC - cd->cpumask = CPU_MASK_ALL; -#else cd->cpumask = cpumask_of_cpu(cpu); -#endif cd->set_next_event = mips_next_event; - cd->set_mode = mips_set_mode; + cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; clockevents_register_device(cd); @@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) cp0_timer_irq_installed = 1; -#ifdef CONFIG_MIPS_MT_SMTC -#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) - setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); -#else setup_irq(irq, &c0_compare_irqaction); -#endif return 0; } + +#endif /* Not CONFIG_MIPS_MT_SMTC */ diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 00000000000..5162fe4b595 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c @@ -0,0 +1,321 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 MIPS Technologies, Inc. + * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> + * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl + */ +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/percpu.h> + +#include <asm/smtc_ipi.h> +#include <asm/time.h> +#include <asm/cevt-r4k.h> + +/* + * Variant clock event timer support for SMTC on MIPS 34K, 1004K + * or other MIPS MT cores. + * + * Notes on SMTC Support: + * + * SMTC has multiple microthread TCs pretending to be Linux CPUs. + * But there's only one Count/Compare pair per VPE, and Compare + * interrupts are taken opportunisitically by available TCs + * bound to the VPE with the Count register. The new timer + * framework provides for global broadcasts, but we really + * want VPE-level multicasts for best behavior. So instead + * of invoking the high-level clock-event broadcast code, + * this version of SMTC support uses the historical SMTC + * multicast mechanisms "under the hood", appearing to the + * generic clock layer as if the interrupts are per-CPU. + * + * The approach taken here is to maintain a set of NR_CPUS + * virtual timers, and track which "CPU" needs to be alerted + * at each event. + * + * It's unlikely that we'll see a MIPS MT core with more than + * 2 VPEs, but we *know* that we won't need to handle more + * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements + * is always going to be overkill, but always going to be enough. + */ + +unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; +static int smtc_nextinvpe[NR_CPUS]; + +/* + * Timestamps stored are absolute values to be programmed + * into Count register. Valid timestamps will never be zero. + * If a Zero Count value is actually calculated, it is converted + * to be a 1, which will introduce 1 or two CPU cycles of error + * roughly once every four billion events, which at 1000 HZ means + * about once every 50 days. If that's actually a problem, one + * could alternate squashing 0 to 1 and to -1. + */ + +#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) +#define ISVALID(x) ((x) != 0L) + +/* + * Time comparison is subtle, as it's really truncated + * modular arithmetic. + */ + +#define IS_SOONER(a, b, reference) \ + (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) + +/* + * CATCHUP_INCREMENT, used when the function falls behind the counter. + * Could be an increasing function instead of a constant; + */ + +#define CATCHUP_INCREMENT 64 + +static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + unsigned long flags; + unsigned int mtflags; + unsigned long timestamp, reference, previous; + unsigned long nextcomp = 0L; + int vpe = current_cpu_data.vpe_id; + int cpu = smp_processor_id(); + local_irq_save(flags); + mtflags = dmt(); + + /* + * Maintain the per-TC virtual timer + * and program the per-VPE shared Count register + * as appropriate here... + */ + reference = (unsigned long)read_c0_count(); + timestamp = MAKEVALID(reference + delta); + /* + * To really model the clock, we have to catch the case + * where the current next-in-VPE timestamp is the old + * timestamp for the calling CPE, but the new value is + * in fact later. In that case, we have to do a full + * scan and discover the new next-in-VPE CPU id and + * timestamp. + */ + previous = smtc_nexttime[vpe][cpu]; + if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) + && IS_SOONER(previous, timestamp, reference)) { + int i; + int soonest = cpu; + + /* + * Update timestamp array here, so that new + * value gets considered along with those of + * other virtual CPUs on the VPE. + */ + smtc_nexttime[vpe][cpu] = timestamp; + for_each_online_cpu(i) { + if (ISVALID(smtc_nexttime[vpe][i]) + && IS_SOONER(smtc_nexttime[vpe][i], + smtc_nexttime[vpe][soonest], reference)) { + soonest = i; + } + } + smtc_nextinvpe[vpe] = soonest; + nextcomp = smtc_nexttime[vpe][soonest]; + /* + * Otherwise, we don't have to process the whole array rank, + * we just have to see if the event horizon has gotten closer. + */ + } else { + if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || + IS_SOONER(timestamp, + smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { + smtc_nextinvpe[vpe] = cpu; + nextcomp = timestamp; + } + /* + * Since next-in-VPE may me the same as the executing + * virtual CPU, we update the array *after* checking + * its value. + */ + smtc_nexttime[vpe][cpu] = timestamp; + } + + /* + * It may be that, in fact, we don't need to update Compare, + * but if we do, we want to make sure we didn't fall into + * a crack just behind Count. + */ + if (ISVALID(nextcomp)) { + write_c0_compare(nextcomp); + ehb(); + /* + * We never return an error, we just make sure + * that we trigger the handlers as quickly as + * we can if we fell behind. + */ + while ((nextcomp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) { + nextcomp += CATCHUP_INCREMENT; + write_c0_compare(nextcomp); + ehb(); + } + } + emt(mtflags); + local_irq_restore(flags); + return 0; +} + + +void smtc_distribute_timer(int vpe) +{ + unsigned long flags; + unsigned int mtflags; + int cpu; + struct clock_event_device *cd; + unsigned long nextstamp = 0L; + unsigned long reference; + + +repeat: + for_each_online_cpu(cpu) { + /* + * Find virtual CPUs within the current VPE who have + * unserviced timer requests whose time is now past. + */ + local_irq_save(flags); + mtflags = dmt(); + if (cpu_data[cpu].vpe_id == vpe && + ISVALID(smtc_nexttime[vpe][cpu])) { + reference = (unsigned long)read_c0_count(); + if ((smtc_nexttime[vpe][cpu] - reference) + > (unsigned long)LONG_MAX) { + smtc_nexttime[vpe][cpu] = 0L; + emt(mtflags); + local_irq_restore(flags); + /* + * We don't send IPIs to ourself. + */ + if (cpu != smp_processor_id()) { + smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); + } else { + cd = &per_cpu(mips_clockevent_device, cpu); + cd->event_handler(cd); + } + } else { + /* Local to VPE but Valid Time not yet reached. */ + if (!ISVALID(nextstamp) || + IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, + reference)) { + smtc_nextinvpe[vpe] = cpu; + nextstamp = smtc_nexttime[vpe][cpu]; + } + emt(mtflags); + local_irq_restore(flags); + } + } else { + emt(mtflags); + local_irq_restore(flags); + + } + } + /* Reprogram for interrupt at next soonest timestamp for VPE */ + if (ISVALID(nextstamp)) { + write_c0_compare(nextstamp); + ehb(); + if ((nextstamp - (unsigned long)read_c0_count()) + > (unsigned long)LONG_MAX) + goto repeat; + } +} + + +irqreturn_t c0_compare_interrupt(int irq, void *dev_id) +{ + int cpu = smp_processor_id(); + + /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ + handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* Clear Count/Compare Interrupt */ + write_c0_compare(read_c0_compare()); + smtc_distribute_timer(cpu_data[cpu].vpe_id); + } + return IRQ_HANDLED; +} + + +int __cpuinit mips_clockevent_init(void) +{ + uint64_t mips_freq = mips_hpt_frequency; + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + int i; + int j; + + if (!cpu_has_counter || !mips_hpt_frequency) + return -ENXIO; + if (cpu == 0) { + for (i = 0; i < num_possible_cpus(); i++) { + smtc_nextinvpe[i] = 0; + for (j = 0; j < num_possible_cpus(); j++) + smtc_nexttime[i][j] = 0L; + } + /* + * SMTC also can't have the usablility test + * run by secondary TCs once Compare is in use. + */ + if (!c0_compare_int_usable()) + return -ENXIO; + } + + /* + * With vectored interrupts things are getting platform specific. + * get_c0_compare_int is a hook to allow a platform to return the + * interrupt number of it's liking. + */ + irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; + if (get_c0_compare_int) + irq = get_c0_compare_int(); + + cd = &per_cpu(mips_clockevent_device, cpu); + + cd->name = "MIPS"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + /* Calculate the min / max delta */ + cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); + cd->shift = 32; + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of_cpu(cpu); + cd->set_next_event = mips_next_event; + cd->set_mode = mips_set_clock_mode; + cd->event_handler = mips_event_handler; + + clockevents_register_device(cd); + + /* + * On SMTC we only want to do the data structure + * initialization and IRQ setup once. + */ + if (cpu) + return 0; + /* + * And we need the hwmask associated with the c0_compare + * vector to be initialized. + */ + irq_hwmask[irq] = (0x100 << cp0_compare_irq); + if (cp0_timer_irq_installed) |