diff options
Diffstat (limited to 'arch/blackfin/kernel/ipipe.c')
| -rw-r--r-- | arch/blackfin/kernel/ipipe.c | 275 |
1 files changed, 164 insertions, 111 deletions
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index a5de8d45424..f657b38163e 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c @@ -27,13 +27,12 @@ #include <linux/interrupt.h> #include <linux/percpu.h> #include <linux/bitops.h> -#include <linux/slab.h> #include <linux/errno.h> #include <linux/kthread.h> -#include <asm/unistd.h> -#include <asm/system.h> -#include <asm/atomic.h> -#include <asm/io.h> +#include <linux/unistd.h> +#include <linux/io.h> +#include <linux/atomic.h> +#include <asm/irq_handler.h> DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); @@ -52,7 +51,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale); atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; -unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; +unsigned long __ipipe_irq_lvmask = bfin_no_irqs; EXPORT_SYMBOL(__ipipe_irq_lvmask); static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) @@ -90,6 +89,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr(); struct ipipe_domain *this_domain, *next_domain; struct list_head *head, *pos; + struct ipipe_irqdesc *idesc; int m_ack, s = -1; /* @@ -99,18 +99,21 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) * interrupt. */ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); - this_domain = ipipe_current_domain; + this_domain = __ipipe_current_domain; + idesc = &this_domain->irqs[irq]; - if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) + if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control))) head = &this_domain->p_link; else { head = __ipipe_pipeline.next; next_domain = list_entry(head, struct ipipe_domain, p_link); - if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { - if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) - next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); + idesc = &next_domain->irqs[irq]; + if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) { + if (!m_ack && idesc->acknowledge != NULL) + idesc->acknowledge(irq, irq_to_desc(irq)); if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status)) - s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status); + s = __test_and_set_bit(IPIPE_STALL_FLAG, + &p->status); __ipipe_dispatch_wired(next_domain, irq); goto out; } @@ -121,14 +124,15 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) pos = head; while (pos != &__ipipe_pipeline) { next_domain = list_entry(pos, struct ipipe_domain, p_link); - if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { + idesc = &next_domain->irqs[irq]; + if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) { __ipipe_set_irq_pending(next_domain, irq); - if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) { - next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq)); + if (!m_ack && idesc->acknowledge != NULL) { + idesc->acknowledge(irq, irq_to_desc(irq)); m_ack = 1; } } - if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) + if (!test_bit(IPIPE_PASS_FLAG, &idesc->control)) break; pos = next_domain->p_link.next; } @@ -150,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) * pending for it. */ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && - ipipe_head_cpudom_var(irqpend_himask) == 0) + !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) goto out; __ipipe_walk_pipeline(head); @@ -159,15 +163,10 @@ out: __clear_bit(IPIPE_STALL_FLAG, &p->status); } -int __ipipe_check_root(void) -{ - return ipipe_root_domain_p; -} - void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { struct irq_desc *desc = irq_to_desc(irq); - int prio = desc->ic_prio; + int prio = __ipipe_get_irq_priority(irq); desc->depth = 0; if (ipd != &ipipe_root && @@ -178,8 +177,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc); void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) { - struct irq_desc *desc = irq_to_desc(irq); - int prio = desc->ic_prio; + int prio = __ipipe_get_irq_priority(irq); if (ipd != &ipipe_root && atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) @@ -187,88 +185,59 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) } EXPORT_SYMBOL(__ipipe_disable_irqdesc); -void __ipipe_stall_root_raw(void) +asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) { - /* - * This code is called by the ins{bwl} routines (see - * arch/blackfin/lib/ins.S), which are heavily used by the - * network stack. It masks all interrupts but those handled by - * non-root domains, so that we keep decent network transfer - * rates for Linux without inducing pathological jitter for - * the real-time domain. - */ - __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask)); + struct ipipe_percpu_domain_data *p; + void (*hook)(void); + int ret; - __set_bit(IPIPE_STALL_FLAG, - &ipipe_root_cpudom_var(status)); -} - -void __ipipe_unstall_root_raw(void) -{ - __clear_bit(IPIPE_STALL_FLAG, - &ipipe_root_cpudom_var(status)); - - __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); -} - -int __ipipe_syscall_root(struct pt_regs *regs) -{ - unsigned long flags; + WARN_ON_ONCE(irqs_disabled_hw()); /* - * We need to run the IRQ tail hook whenever we don't - * propagate a syscall to higher domains, because we know that - * important operations might be pending there (e.g. Xenomai - * deferred rescheduling). + * We need to run the IRQ tail hook each time we intercept a + * syscall, because we know that important operations might be + * pending there (e.g. Xenomai deferred rescheduling). */ - - if (regs->orig_p0 < NR_syscalls) { - void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; - hook(); - if ((current->flags & PF_EVNOTIFY) == 0) - return 0; - } + hook = (__typeof__(hook))__ipipe_irq_tail_hook; + hook(); /* * This routine either returns: * 0 -- if the syscall is to be passed to Linux; - * 1 -- if the syscall should not be passed to Linux, and no + * >0 -- if the syscall should not be passed to Linux, and no * tail work should be performed; - * -1 -- if the syscall should not be passed to Linux but the + * <0 -- if the syscall should not be passed to Linux but the * tail work has to be performed (for handling signals etc). */ - if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && - __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { - if (ipipe_root_domain_p && !in_atomic()) { - /* - * Sync pending VIRQs before _TIF_NEED_RESCHED - * is tested. - */ - local_irq_save_hw(flags); - if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); - local_irq_restore_hw(flags); - return -1; - } - return 1; - } + if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || + !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) + return 0; - return 0; -} + ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); -unsigned long ipipe_critical_enter(void (*syncfn) (void)) -{ - unsigned long flags; + hard_local_irq_disable(); - local_irq_save_hw(flags); + /* + * This is the end of the syscall path, so we may + * safely assume a valid Linux task stack here. + */ + if (current->ipipe_flags & PF_EVTRET) { + current->ipipe_flags &= ~PF_EVTRET; + __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); + } - return flags; -} + if (!__ipipe_root_domain_p) + ret = -1; + else { + p = ipipe_root_cpudom_ptr(); + if (__ipipe_ipending_p(p)) + __ipipe_sync_pipeline(); + } -void ipipe_critical_exit(unsigned long flags) -{ - local_irq_restore_hw(flags); + hard_local_irq_enable(); + + return -ret; } static void __ipipe_no_irqtail(void) @@ -277,10 +246,11 @@ static void __ipipe_no_irqtail(void) int ipipe_get_sysinfo(struct ipipe_sysinfo *info) { - info->ncpus = num_online_cpus(); - info->cpufreq = ipipe_cpu_freq(); - info->archdep.tmirq = IPIPE_TIMER_IRQ; - info->archdep.tmfreq = info->cpufreq; + info->sys_nr_cpus = num_online_cpus(); + info->sys_cpu_freq = ipipe_cpu_freq(); + info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; + info->sys_hrtimer_freq = __ipipe_core_clock; + info->sys_hrclock_freq = __ipipe_core_clock; return 0; } @@ -301,44 +271,127 @@ int ipipe_trigger_irq(unsigned irq) return -EINVAL; #endif - local_irq_save_hw(flags); + flags = hard_local_irq_save(); __ipipe_handle_irq(irq, NULL); - local_irq_restore_hw(flags); + hard_local_irq_restore(flags); return 1; } asmlinkage void __ipipe_sync_root(void) { + void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; + struct ipipe_percpu_domain_data *p; unsigned long flags; BUG_ON(irqs_disabled()); - local_irq_save_hw(flags); + flags = hard_local_irq_save(); + + if (irq_tail_hook) + irq_tail_hook(); clear_thread_flag(TIF_IRQ_SYNC); - if (ipipe_root_cpudom_var(irqpend_himask) != 0) - __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); + p = ipipe_root_cpudom_ptr(); + if (__ipipe_ipending_p(p)) + __ipipe_sync_pipeline(); - local_irq_restore_hw(flags); + hard_local_irq_restore(flags); } -void ___ipipe_sync_pipeline(unsigned long syncmask) +void ___ipipe_sync_pipeline(void) { - struct ipipe_domain *ipd = ipipe_current_domain; + if (__ipipe_root_domain_p && + test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) + return; - if (ipd == ipipe_root_domain) { - if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) - return; - } + __ipipe_sync_stage(); +} - __ipipe_sync_stage(syncmask); +void __ipipe_disable_root_irqs_hw(void) +{ + /* + * This code is called by the ins{bwl} routines (see + * arch/blackfin/lib/ins.S), which are heavily used by the + * network stack. It masks all interrupts but those handled by + * non-root domains, so that we keep decent network transfer + * rates for Linux without inducing pathological jitter for + * the real-time domain. + */ + bfin_sti(__ipipe_irq_lvmask); + __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); } -EXPORT_SYMBOL(show_stack); +void __ipipe_enable_root_irqs_hw(void) +{ + __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); + bfin_sti(bfin_irq_flags); +} + +/* + * We could use standard atomic bitops in the following root status + * manipulation routines, but let's prepare for SMP support in the + * same move, preventing CPU migration as required. + */ +void __ipipe_stall_root(void) +{ + unsigned long *p, flags; + + flags = hard_local_irq_save(); + p = &__ipipe_root_status; + __set_bit(IPIPE_STALL_FLAG, p); + hard_local_irq_restore(flags); +} +EXPORT_SYMBOL(__ipipe_stall_root); -#ifdef CONFIG_IPIPE_TRACE_MCOUNT -void notrace _mcount(void); -EXPORT_SYMBOL(_mcount); -#endif /* CONFIG_IPIPE_TRACE_MCOUNT */ +unsigned long __ipipe_test_and_stall_root(void) +{ + unsigned long *p, flags; + int x; + + flags = hard_local_irq_save(); + p = &__ipipe_root_status; + x = __test_and_set_bit(IPIPE_STALL_FLAG, p); + hard_local_irq_restore(flags); + + return x; +} +EXPORT_SYMBOL(__ipipe_test_and_stall_root); + +unsigned long __ipipe_test_root(void) +{ + const unsigned long *p; + unsigned long flags; + int x; + + flags = hard_local_irq_save_smp(); + p = &__ipipe_root_status; + x = test_bit(IPIPE_STALL_FLAG, p); + hard_local_irq_restore_smp(flags); + + return x; +} +EXPORT_SYMBOL(__ipipe_test_root); + +void __ipipe_lock_root(void) +{ + unsigned long *p, flags; + + flags = hard_local_irq_save(); + p = &__ipipe_root_status; + __set_bit(IPIPE_SYNCDEFER_FLAG, p); + hard_local_irq_restore(flags); +} +EXPORT_SYMBOL(__ipipe_lock_root); + +void __ipipe_unlock_root(void) +{ + unsigned long *p, flags; + + flags = hard_local_irq_save(); + p = &__ipipe_root_status; + __clear_bit(IPIPE_SYNCDEFER_FLAG, p); + hard_local_irq_restore(flags); +} +EXPORT_SYMBOL(__ipipe_unlock_root); |
