aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel/ipipe.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel/ipipe.c')
-rw-r--r--arch/blackfin/kernel/ipipe.c395
1 files changed, 182 insertions, 213 deletions
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 339be5a3ae6..f657b38163e 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -27,22 +27,15 @@
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/bitops.h>
-#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/kthread.h>
-#include <asm/unistd.h>
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <asm/io.h>
-
-static int create_irq_threads;
+#include <linux/unistd.h>
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <asm/irq_handler.h>
DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
-static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
-
-static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
-
asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
static void __ipipe_no_irqtail(void);
@@ -58,7 +51,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -93,8 +86,10 @@ void __ipipe_enable_pipeline(void)
*/
void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
{
+ struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
struct ipipe_domain *this_domain, *next_domain;
struct list_head *head, *pos;
+ struct ipipe_irqdesc *idesc;
int m_ack, s = -1;
/*
@@ -104,59 +99,41 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* interrupt.
*/
m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
+ this_domain = __ipipe_current_domain;
+ idesc = &this_domain->irqs[irq];
- this_domain = ipipe_current_domain;
-
- if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
+ if (unlikely(test_bit(IPIPE_STICKY_FLAG, &idesc->control)))
head = &this_domain->p_link;
else {
head = __ipipe_pipeline.next;
next_domain = list_entry(head, struct ipipe_domain, p_link);
- if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
- if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
- next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
- if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
+ idesc = &next_domain->irqs[irq];
+ if (likely(test_bit(IPIPE_WIRED_FLAG, &idesc->control))) {
+ if (!m_ack && idesc->acknowledge != NULL)
+ idesc->acknowledge(irq, irq_to_desc(irq));
+ if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
s = __test_and_set_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
+ &p->status);
__ipipe_dispatch_wired(next_domain, irq);
- goto finalize;
- return;
+ goto out;
}
}
/* Ack the interrupt. */
pos = head;
-
while (pos != &__ipipe_pipeline) {
next_domain = list_entry(pos, struct ipipe_domain, p_link);
- /*
- * For each domain handling the incoming IRQ, mark it
- * as pending in its log.
- */
- if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
- /*
- * Domains that handle this IRQ are polled for
- * acknowledging it by decreasing priority
- * order. The interrupt must be made pending
- * _first_ in the domain's status flags before
- * the PIC is unlocked.
- */
+ idesc = &next_domain->irqs[irq];
+ if (test_bit(IPIPE_HANDLE_FLAG, &idesc->control)) {
__ipipe_set_irq_pending(next_domain, irq);
-
- if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
- next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+ if (!m_ack && idesc->acknowledge != NULL) {
+ idesc->acknowledge(irq, irq_to_desc(irq));
m_ack = 1;
}
}
-
- /*
- * If the domain does not want the IRQ to be passed
- * down the interrupt pipe, exit the loop now.
- */
- if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+ if (!test_bit(IPIPE_PASS_FLAG, &idesc->control))
break;
-
pos = next_domain->p_link.next;
}
@@ -166,29 +143,30 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
* immediately to the current domain if the interrupt has been
* marked as 'sticky'. This search does not go beyond the
* current domain in the pipeline. We also enforce the
- * additional root stage lock (blackfin-specific). */
+ * additional root stage lock (blackfin-specific).
+ */
+ if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
- if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
- s = __test_and_set_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-finalize:
+ /*
+ * If the interrupt preempted the head domain, then do not
+ * even try to walk the pipeline, unless an interrupt is
+ * pending for it.
+ */
+ if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
+ !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
+ goto out;
__ipipe_walk_pipeline(head);
-
+out:
if (!s)
- __clear_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-}
-
-int __ipipe_check_root(void)
-{
- return ipipe_root_domain_p;
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
}
void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
- struct irq_desc *desc = irq_desc + irq;
- int prio = desc->ic_prio;
+ struct irq_desc *desc = irq_to_desc(irq);
+ int prio = __ipipe_get_irq_priority(irq);
desc->depth = 0;
if (ipd != &ipipe_root &&
@@ -199,8 +177,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
{
- struct irq_desc *desc = irq_desc + irq;
- int prio = desc->ic_prio;
+ int prio = __ipipe_get_irq_priority(irq);
if (ipd != &ipipe_root &&
atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
@@ -208,85 +185,59 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
}
EXPORT_SYMBOL(__ipipe_disable_irqdesc);
-void __ipipe_stall_root_raw(void)
+asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
{
- /*
- * This code is called by the ins{bwl} routines (see
- * arch/blackfin/lib/ins.S), which are heavily used by the
- * network stack. It masks all interrupts but those handled by
- * non-root domains, so that we keep decent network transfer
- * rates for Linux without inducing pathological jitter for
- * the real-time domain.
- */
- __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
+ struct ipipe_percpu_domain_data *p;
+ void (*hook)(void);
+ int ret;
- __set_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-}
+ WARN_ON_ONCE(irqs_disabled_hw());
-void __ipipe_unstall_root_raw(void)
-{
- __clear_bit(IPIPE_STALL_FLAG,
- &ipipe_root_cpudom_var(status));
-
- __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
-}
-
-int __ipipe_syscall_root(struct pt_regs *regs)
-{
- unsigned long flags;
-
- /* We need to run the IRQ tail hook whenever we don't
- * propagate a syscall to higher domains, because we know that
- * important operations might be pending there (e.g. Xenomai
- * deferred rescheduling). */
-
- if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
- void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
- hook();
- return 0;
- }
+ /*
+ * We need to run the IRQ tail hook each time we intercept a
+ * syscall, because we know that important operations might be
+ * pending there (e.g. Xenomai deferred rescheduling).
+ */
+ hook = (__typeof__(hook))__ipipe_irq_tail_hook;
+ hook();
/*
* This routine either returns:
* 0 -- if the syscall is to be passed to Linux;
- * 1 -- if the syscall should not be passed to Linux, and no
+ * >0 -- if the syscall should not be passed to Linux, and no
* tail work should be performed;
- * -1 -- if the syscall should not be passed to Linux but the
+ * <0 -- if the syscall should not be passed to Linux but the
* tail work has to be performed (for handling signals etc).
*/
- if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
- __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
- if (ipipe_root_domain_p && !in_atomic()) {
- /*
- * Sync pending VIRQs before _TIF_NEED_RESCHED
- * is tested.
- */
- local_irq_save_hw(flags);
- if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
- __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
- local_irq_restore_hw(flags);
- return -1;
- }
- return 1;
- }
+ if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
+ !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+ return 0;
- return 0;
-}
+ ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
-unsigned long ipipe_critical_enter(void (*syncfn) (void))
-{
- unsigned long flags;
+ hard_local_irq_disable();
- local_irq_save_hw(flags);
+ /*
+ * This is the end of the syscall path, so we may
+ * safely assume a valid Linux task stack here.
+ */
+ if (current->ipipe_flags & PF_EVTRET) {
+ current->ipipe_flags &= ~PF_EVTRET;
+ __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
+ }
- return flags;
-}
+ if (!__ipipe_root_domain_p)
+ ret = -1;
+ else {
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
+ }
-void ipipe_critical_exit(unsigned long flags)
-{
- local_irq_restore_hw(flags);
+ hard_local_irq_enable();
+
+ return -ret;
}
static void __ipipe_no_irqtail(void)
@@ -295,10 +246,11 @@ static void __ipipe_no_irqtail(void)
int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
{
- info->ncpus = num_online_cpus();
- info->cpufreq = ipipe_cpu_freq();
- info->archdep.tmirq = IPIPE_TIMER_IRQ;
- info->archdep.tmfreq = info->cpufreq;
+ info->sys_nr_cpus = num_online_cpus();
+ info->sys_cpu_freq = ipipe_cpu_freq();
+ info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
+ info->sys_hrtimer_freq = __ipipe_core_clock;
+ info->sys_hrclock_freq = __ipipe_core_clock;
return 0;
}
@@ -312,117 +264,134 @@ int ipipe_trigger_irq(unsigned irq)
{
unsigned long flags;
+#ifdef CONFIG_IPIPE_DEBUG
if (irq >= IPIPE_NR_IRQS ||
(ipipe_virtual_irq_p(irq)
&& !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
return -EINVAL;
+#endif
- local_irq_save_hw(flags);
-
+ flags = hard_local_irq_save();
__ipipe_handle_irq(irq, NULL);
-
- local_irq_restore_hw(flags);
+ hard_local_irq_restore(flags);
return 1;
}
-/* Move Linux IRQ to threads. */
+asmlinkage void __ipipe_sync_root(void)
+{
+ void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+
+ BUG_ON(irqs_disabled());
+
+ flags = hard_local_irq_save();
+
+ if (irq_tail_hook)
+ irq_tail_hook();
+
+ clear_thread_flag(TIF_IRQ_SYNC);
-static int do_irqd(void *__desc)
+ p = ipipe_root_cpudom_ptr();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline();
+
+ hard_local_irq_restore(flags);
+}
+
+void ___ipipe_sync_pipeline(void)
{
- struct irq_desc *desc = __desc;
- unsigned irq = desc - irq_desc;
- int thrprio = desc->thr_prio;
- int thrmask = 1 << thrprio;
- int cpu = smp_processor_id();
- cpumask_t cpumask;
-
- sigfillset(&current->blocked);
- current->flags |= PF_NOFREEZE;
- cpumask = cpumask_of_cpu(cpu);
- set_cpus_allowed(current, cpumask);
- ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
-
- while (!kthread_should_stop()) {
- local_irq_disable();
- if (!(desc->status & IRQ_SCHEDULED)) {
- set_current_state(TASK_INTERRUPTIBLE);
-resched:
- local_irq_enable();
- schedule();
- local_irq_disable();
- }
- __set_current_state(TASK_RUNNING);
- /*
- * If higher priority interrupt servers are ready to
- * run, reschedule immediately. We need this for the
- * GPIO demux IRQ handler to unmask the interrupt line
- * _last_, after all GPIO IRQs have run.
- */
- if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
- goto resched;
- if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
- per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
- desc->status &= ~IRQ_SCHEDULED;
- desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
- local_irq_enable();
- }
- __set_current_state(TASK_RUNNING);
- return 0;
+ if (__ipipe_root_domain_p &&
+ test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
+ return;
+
+ __ipipe_sync_stage();
}
-static void kick_irqd(unsigned irq, void *cookie)
+void __ipipe_disable_root_irqs_hw(void)
{
- struct irq_desc *desc = irq_desc + irq;
- int thrprio = desc->thr_prio;
- int thrmask = 1 << thrprio;
- int cpu = smp_processor_id();
-
- if (!(desc->status & IRQ_SCHEDULED)) {
- desc->status |= IRQ_SCHEDULED;
- per_cpu(pending_irqthread_mask, cpu) |= thrmask;
- ++per_cpu(pending_irq_count[thrprio], cpu);
- wake_up_process(desc->thread);
- }
+ /*
+ * This code is called by the ins{bwl} routines (see
+ * arch/blackfin/lib/ins.S), which are heavily used by the
+ * network stack. It masks all interrupts but those handled by
+ * non-root domains, so that we keep decent network transfer
+ * rates for Linux without inducing pathological jitter for
+ * the real-time domain.
+ */
+ bfin_sti(__ipipe_irq_lvmask);
+ __set_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
}
-int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
+void __ipipe_enable_root_irqs_hw(void)
{
- if (desc->thread || !create_irq_threads)
- return 0;
+ __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+ bfin_sti(bfin_irq_flags);
+}
- desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
- if (desc->thread == NULL) {
- printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
- return -ENOMEM;
- }
+/*
+ * We could use standard atomic bitops in the following root status
+ * manipulation routines, but let's prepare for SMP support in the
+ * same move, preventing CPU migration as required.
+ */
+void __ipipe_stall_root(void)
+{
+ unsigned long *p, flags;
- wake_up_process(desc->thread);
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_stall_root);
- desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
- ipipe_root_domain->irqs[irq].handler = &kick_irqd;
+unsigned long __ipipe_test_and_stall_root(void)
+{
+ unsigned long *p, flags;
+ int x;
- return 0;
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore(flags);
+
+ return x;
}
+EXPORT_SYMBOL(__ipipe_test_and_stall_root);
-void __init ipipe_init_irq_threads(void)
+unsigned long __ipipe_test_root(void)
{
- unsigned irq;
- struct irq_desc *desc;
+ const unsigned long *p;
+ unsigned long flags;
+ int x;
- create_irq_threads = 1;
+ flags = hard_local_irq_save_smp();
+ p = &__ipipe_root_status;
+ x = test_bit(IPIPE_STALL_FLAG, p);
+ hard_local_irq_restore_smp(flags);
- for (irq = 0; irq < NR_IRQS; irq++) {
- desc = irq_desc + irq;
- if (desc->action != NULL ||
- (desc->status & IRQ_NOREQUEST) != 0)
- ipipe_start_irq_thread(irq, desc);
- }
+ return x;
}
+EXPORT_SYMBOL(__ipipe_test_root);
-EXPORT_SYMBOL(show_stack);
+void __ipipe_lock_root(void)
+{
+ unsigned long *p, flags;
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __set_bit(IPIPE_SYNCDEFER_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_lock_root);
+
+void __ipipe_unlock_root(void)
+{
+ unsigned long *p, flags;
+
+ flags = hard_local_irq_save();
+ p = &__ipipe_root_status;
+ __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__ipipe_unlock_root);