diff options
Diffstat (limited to 'arch/powerpc/platforms/cell/spu_base.c')
| -rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 1013 |
1 files changed, 577 insertions, 436 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index d75ae03df68..f85db3a69b4 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -25,39 +25,117 @@ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/module.h> -#include <linux/poll.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/wait.h> - -#include <asm/io.h> -#include <asm/prom.h> -#include <asm/semaphore.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/mutex.h> +#include <linux/linux_logo.h> +#include <linux/syscore_ops.h> #include <asm/spu.h> -#include <asm/mmu_context.h> +#include <asm/spu_priv1.h> +#include <asm/spu_csa.h> +#include <asm/xmon.h> +#include <asm/prom.h> +#include <asm/kexec.h> + +const struct spu_management_ops *spu_management_ops; +EXPORT_SYMBOL_GPL(spu_management_ops); + +const struct spu_priv1_ops *spu_priv1_ops; +EXPORT_SYMBOL_GPL(spu_priv1_ops); + +struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; +EXPORT_SYMBOL_GPL(cbe_spu_info); + +/* + * The spufs fault-handling code needs to call force_sig_info to raise signals + * on DMA errors. Export it here to avoid general kernel-wide access to this + * function + */ +EXPORT_SYMBOL_GPL(force_sig_info); + +/* + * Protects cbe_spu_info and spu->number. + */ +static DEFINE_SPINLOCK(spu_lock); + +/* + * List of all spus in the system. + * + * This list is iterated by callers from irq context and callers that + * want to sleep. Thus modifications need to be done with both + * spu_full_list_lock and spu_full_list_mutex held, while iterating + * through it requires either of these locks. + * + * In addition spu_full_list_lock protects all assignmens to + * spu->mm. + */ +static LIST_HEAD(spu_full_list); +static DEFINE_SPINLOCK(spu_full_list_lock); +static DEFINE_MUTEX(spu_full_list_mutex); -#include "interrupt.h" +struct spu_slb { + u64 esid, vsid; +}; -static int __spu_trap_invalid_dma(struct spu *spu) +void spu_invalidate_slbs(struct spu *spu) { - pr_debug("%s\n", __FUNCTION__); - force_sig(SIGBUS, /* info, */ current); - return 0; + struct spu_priv2 __iomem *priv2 = spu->priv2; + unsigned long flags; + + spin_lock_irqsave(&spu->register_lock, flags); + if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) + out_be64(&priv2->slb_invalidate_all_W, 0UL); + spin_unlock_irqrestore(&spu->register_lock, flags); } +EXPORT_SYMBOL_GPL(spu_invalidate_slbs); -static int __spu_trap_dma_align(struct spu *spu) +/* This is called by the MM core when a segment size is changed, to + * request a flush of all the SPEs using a given mm + */ +void spu_flush_all_slbs(struct mm_struct *mm) { - pr_debug("%s\n", __FUNCTION__); - force_sig(SIGBUS, /* info, */ current); - return 0; + struct spu *spu; + unsigned long flags; + + spin_lock_irqsave(&spu_full_list_lock, flags); + list_for_each_entry(spu, &spu_full_list, full_list) { + if (spu->mm == mm) + spu_invalidate_slbs(spu); + } + spin_unlock_irqrestore(&spu_full_list_lock, flags); } -static int __spu_trap_error(struct spu *spu) +/* The hack below stinks... try to do something better one of + * these days... Does it even work properly with NR_CPUS == 1 ? + */ +static inline void mm_needs_global_tlbie(struct mm_struct *mm) { - pr_debug("%s\n", __FUNCTION__); - force_sig(SIGILL, /* info, */ current); - return 0; + int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; + + /* Global TLBIE broadcast required with SPEs. */ + bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr); +} + +void spu_associate_mm(struct spu *spu, struct mm_struct *mm) +{ + unsigned long flags; + + spin_lock_irqsave(&spu_full_list_lock, flags); + spu->mm = mm; + spin_unlock_irqrestore(&spu_full_list_lock, flags); + if (mm) + mm_needs_global_tlbie(mm); +} +EXPORT_SYMBOL_GPL(spu_associate_mm); + +int spu_64k_pages_available(void) +{ + return mmu_psize_defs[MMU_PAGE_64K].shift != 0; } +EXPORT_SYMBOL_GPL(spu_64k_pages_available); static void spu_restart_dma(struct spu *spu) { @@ -65,167 +143,212 @@ static void spu_restart_dma(struct spu *spu) if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); + else { + set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); + mb(); + } } -static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) +static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) { struct spu_priv2 __iomem *priv2 = spu->priv2; - struct mm_struct *mm = spu->mm; - u64 esid, vsid; - pr_debug("%s\n", __FUNCTION__); + pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n", + __func__, slbe, slb->vsid, slb->esid); - if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { - /* SLBs are pre-loaded for context switch, so - * we should never get here! - */ - printk("%s: invalid access during switch!\n", __func__); - return 1; - } - if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { + out_be64(&priv2->slb_index_W, slbe); + /* set invalid before writing vsid */ + out_be64(&priv2->slb_esid_RW, 0); + /* now it's safe to write the vsid */ + out_be64(&priv2->slb_vsid_RW, slb->vsid); + /* setting the new esid makes the entry valid again */ + out_be64(&priv2->slb_esid_RW, slb->esid); +} + +static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) +{ + struct mm_struct *mm = spu->mm; + struct spu_slb slb; + int psize; + + pr_debug("%s\n", __func__); + + slb.esid = (ea & ESID_MASK) | SLB_ESID_V; + + switch(REGION_ID(ea)) { + case USER_REGION_ID: +#ifdef CONFIG_PPC_MM_SLICES + psize = get_slice_psize(mm, ea); +#else + psize = mm->context.user_psize; +#endif + slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) + << SLB_VSID_SHIFT) | SLB_VSID_USER; + break; + case VMALLOC_REGION_ID: + if (ea < VMALLOC_END) + psize = mmu_vmalloc_psize; + else + psize = mmu_io_psize; + slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; + break; + case KERNEL_REGION_ID: + psize = mmu_linear_psize; + slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) + << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; + break; + default: /* Future: support kernel segments so that drivers * can use SPUs. */ pr_debug("invalid region access at %016lx\n", ea); return 1; } + slb.vsid |= mmu_psize_defs[psize].sllp; - esid = (ea & ESID_MASK) | SLB_ESID_V; - vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; - if (in_hugepage_area(mm->context, ea)) - vsid |= SLB_VSID_L; - - out_be64(&priv2->slb_index_W, spu->slb_replace); - out_be64(&priv2->slb_vsid_RW, vsid); - out_be64(&priv2->slb_esid_RW, esid); + spu_load_slb(spu, spu->slb_replace, &slb); spu->slb_replace++; if (spu->slb_replace >= 8) spu->slb_replace = 0; spu_restart_dma(spu); - + spu->stats.slb_flt++; return 0; } extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { - pr_debug("%s\n", __FUNCTION__); - - /* Handle kernel space hash faults immediately. - User hash faults need to be deferred to process context. */ - if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) - && REGION_ID(ea) != USER_REGION_ID - && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { - spu_restart_dma(spu); - return 0; - } + int ret; - if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { - printk("%s: invalid access during switch!\n", __func__); - return 1; + pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); + + /* + * Handle kernel space hash faults immediately. User hash + * faults need to be deferred to process context. + */ + if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && + (REGION_ID(ea) != USER_REGION_ID)) { + + spin_unlock(&spu->register_lock); + ret = hash_page(ea, _PAGE_PRESENT, 0x300); + spin_lock(&spu->register_lock); + + if (!ret) { + spu_restart_dma(spu); + return 0; + } } - spu->dar = ea; - spu->dsisr = dsisr; - mb(); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} + spu->class_1_dar = ea; + spu->class_1_dsisr = dsisr; -static int __spu_trap_mailbox(struct spu *spu) -{ - if (spu->ibox_callback) - spu->ibox_callback(spu); + spu->stop_callback(spu, 1); - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x1); - spin_unlock(&spu->register_lock); - return 0; -} + spu->class_1_dar = 0; + spu->class_1_dsisr = 0; -static int __spu_trap_stop(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); return 0; } -static int __spu_trap_halt(struct spu *spu) +static void __spu_kernel_slb(void *addr, struct spu_slb *slb) { - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} + unsigned long ea = (unsigned long)addr; + u64 llp; -static int __spu_trap_tag_group(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - /* wake_up(&spu->dma_wq); */ - return 0; + if (REGION_ID(ea) == KERNEL_REGION_ID) + llp = mmu_psize_defs[mmu_linear_psize].sllp; + else + llp = mmu_psize_defs[mmu_virtual_psize].sllp; + + slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | + SLB_VSID_KERNEL | llp; + slb->esid = (ea & ESID_MASK) | SLB_ESID_V; } -static int __spu_trap_spubox(struct spu *spu) +/** + * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the + * address @new_addr is present. + */ +static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, + void *new_addr) { - if (spu->wbox_callback) - spu->wbox_callback(spu); + unsigned long ea = (unsigned long)new_addr; + int i; + + for (i = 0; i < nr_slbs; i++) + if (!((slbs[i].esid ^ ea) & ESID_MASK)) + return 1; - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x10); - spin_unlock(&spu->register_lock); return 0; } -static irqreturn_t -spu_irq_class_0(int irq, void *data, struct pt_regs *regs) +/** + * Setup the SPU kernel SLBs, in preparation for a context save/restore. We + * need to map both the context save area, and the save/restore code. + * + * Because the lscsa and code may cross segment boundaires, we check to see + * if mappings are required for the start and end of each range. We currently + * assume that the mappings are smaller that one segment - if not, something + * is seriously wrong. + */ +void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, + void *code, int code_size) { - struct spu *spu; + struct spu_slb slbs[4]; + int i, nr_slbs = 0; + /* start and end addresses of both mappings */ + void *addrs[] = { + lscsa, (void *)lscsa + sizeof(*lscsa) - 1, + code, code + code_size - 1 + }; - spu = data; - spu->class_0_pending = 1; - if (spu->stop_callback) - spu->stop_callback(spu); + /* check the set of addresses, and create a new entry in the slbs array + * if there isn't already a SLB for that address */ + for (i = 0; i < ARRAY_SIZE(addrs); i++) { + if (__slb_present(slbs, nr_slbs, addrs[i])) + continue; - return IRQ_HANDLED; + __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); + nr_slbs++; + } + + spin_lock_irq(&spu->register_lock); + /* Add the set of SLBs */ + for (i = 0; i < nr_slbs; i++) + spu_load_slb(spu, i, &slbs[i]); + spin_unlock_irq(&spu->register_lock); } +EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); -int -spu_irq_class_0_bottom(struct spu *spu) +static irqreturn_t +spu_irq_class_0(int irq, void *data) { + struct spu *spu; unsigned long stat, mask; - spu->class_0_pending = 0; + spu = data; + spin_lock(&spu->register_lock); mask = spu_int_mask_get(spu, 0); - stat = spu_int_stat_get(spu, 0); + stat = spu_int_stat_get(spu, 0) & mask; - stat &= mask; - - if (stat & 1) /* invalid MFC DMA */ - __spu_trap_invalid_dma(spu); - - if (stat & 2) /* invalid DMA alignment */ - __spu_trap_dma_align(spu); - - if (stat & 4) /* error on SPU */ - __spu_trap_error(spu); + spu->class_0_pending |= stat; + spu->class_0_dar = spu_mfc_dar_get(spu); + spu->stop_callback(spu, 0); + spu->class_0_pending = 0; + spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); + spin_unlock(&spu->register_lock); - return (stat & 0x7) ? -EIO : 0; + return IRQ_HANDLED; } -EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); static irqreturn_t -spu_irq_class_1(int irq, void *data, struct pt_regs *regs) +spu_irq_class_1(int irq, void *data) { struct spu *spu; unsigned long stat, mask, dar, dsisr; @@ -238,113 +361,130 @@ spu_irq_class_1(int irq, void *data, struct pt_regs *regs) stat = spu_int_stat_get(spu, 1) & mask; dar = spu_mfc_dar_get(spu); dsisr = spu_mfc_dsisr_get(spu); - if (stat & 2) /* mapping fault */ + if (stat & CLASS1_STORAGE_FAULT_INTR) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - spin_unlock(&spu->register_lock); - if (stat & 1) /* segment fault */ + pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, + dar, dsisr); + + if (stat & CLASS1_SEGMENT_FAULT_INTR) __spu_trap_data_seg(spu, dar); - if (stat & 2) { /* mapping fault */ + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); - } - if (stat & 4) /* ls compare & suspend on get */ + if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) ; - if (stat & 8) /* ls compare & suspend on put */ + if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) ; + spu->class_1_dsisr = 0; + spu->class_1_dar = 0; + + spin_unlock(&spu->register_lock); + return stat ? IRQ_HANDLED : IRQ_NONE; } -EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); static irqreturn_t -spu_irq_class_2(int irq, void *data, struct pt_regs *regs) +spu_irq_class_2(int irq, void *data) { struct spu *spu; unsigned long stat; unsigned long mask; + const int mailbox_intrs = + CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; spu = data; + spin_lock(&spu->register_lock); stat = spu_int_stat_get(spu, 2); mask = spu_int_mask_get(spu, 2); + /* ignore interrupts we're not waiting for */ + stat &= mask; + /* mailbox interrupts are level triggered. mask them now before + * acknowledging */ + if (stat & mailbox_intrs) + spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); + /* acknowledge all interrupts before the callbacks */ + spu_int_stat_clear(spu, 2, stat); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); - stat &= mask; + if (stat & CLASS2_MAILBOX_INTR) + spu->ibox_callback(spu); - if (stat & 1) /* PPC core mailbox */ - __spu_trap_mailbox(spu); + if (stat & CLASS2_SPU_STOP_INTR) + spu->stop_callback(spu, 2); - if (stat & 2) /* SPU stop-and-signal */ - __spu_trap_stop(spu); + if (stat & CLASS2_SPU_HALT_INTR) + spu->stop_callback(spu, 2); - if (stat & 4) /* SPU halted */ - __spu_trap_halt(spu); + if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) + spu->mfc_callback(spu); - if (stat & 8) /* DMA tag group complete */ - __spu_trap_tag_group(spu); + if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) + spu->wbox_callback(spu); - if (stat & 0x10) /* SPU mailbox threshold */ - __spu_trap_spubox(spu); + spu->stats.class2_intr++; + + spin_unlock(&spu->register_lock); - spu_int_stat_clear(spu, 2, stat); return stat ? IRQ_HANDLED : IRQ_NONE; } -static int -spu_request_irqs(struct spu *spu) +static int spu_request_irqs(struct spu *spu) { - int ret; - int irq_base; - - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; - - snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); - ret = request_irq(irq_base + spu->isrc, - spu_irq_class_0, 0, spu->irq_c0, spu); - if (ret) - goto out; - - snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); - ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, - spu_irq_class_1, 0, spu->irq_c1, spu); - if (ret) - goto out1; + int ret = 0; - snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); - ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, - spu_irq_class_2, 0, spu->irq_c2, spu); - if (ret) - goto out2; - goto out; + if (spu->irqs[0] != NO_IRQ) { + snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", + spu->number); + ret = request_irq(spu->irqs[0], spu_irq_class_0, + 0, spu->irq_c0, spu); + if (ret) + goto bail0; + } + if (spu->irqs[1] != NO_IRQ) { + snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", + spu->number); + ret = request_irq(spu->irqs[1], spu_irq_class_1, + 0, spu->irq_c1, spu); + if (ret) + goto bail1; + } + if (spu->irqs[2] != NO_IRQ) { + snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", + spu->number); + ret = request_irq(spu->irqs[2], spu_irq_class_2, + 0, spu->irq_c2, spu); + if (ret) + goto bail2; + } + return 0; -out2: - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); -out1: - free_irq(irq_base + spu->isrc, spu); -out: +bail2: + if (spu->irqs[1] != NO_IRQ) + free_irq(spu->irqs[1], spu); +bail1: + if (spu->irqs[0] != NO_IRQ) + free_irq(spu->irqs[0], spu); +bail0: return ret; } -static void -spu_free_irqs(struct spu *spu) +static void spu_free_irqs(struct spu *spu) { - int irq_base; - - irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; - - free_irq(irq_base + spu->isrc, spu); - free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); - free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); + if (spu->irqs[0] != NO_IRQ) + free_irq(spu->irqs[0], spu); + if (spu->irqs[1] != NO_IRQ) + free_irq(spu->irqs[1], spu); + if (spu->irqs[2] != NO_IRQ) + free_irq(spu->irqs[2], spu); } -static LIST_HEAD(spu_list); -static DECLARE_MUTEX(spu_mutex); - -static void spu_init_channels(struct spu *spu) +void spu_init_channels(struct spu *spu) { static const struct { unsigned channel; @@ -377,332 +517,333 @@ static void spu_init_channels(struct spu *spu) out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); } } +EXPORT_SYMBOL_GPL(spu_init_channels); -struct spu *spu_alloc(void) +static struct bus_type spu_subsys = { + .name = "spu", + .dev_name = "spu", +}; + +int spu_add_dev_attr(struct device_attribute *attr) { struct spu *spu; - down(&spu_mutex); - if (!list_empty(&spu_list)) { - spu = list_entry(spu_list.next, struct spu, list); - list_del_init(&spu->list); - pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); - } else { - pr_debug("No SPU left\n"); - spu = NULL; - } - up(&spu_mutex); - - if (spu) - spu_init_channels(spu); + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) + device_create_file(&spu->dev, attr); + mutex_unlock(&spu_full_list_mutex); - return spu; + return 0; } -EXPORT_SYMBOL_GPL(spu_alloc); +EXPORT_SYMBOL_GPL(spu_add_dev_attr); -void spu_free(struct spu *spu) +int spu_add_dev_attr_group(struct attribute_group *attrs) { - down(&spu_mutex); - list_add_tail(&spu->list, &spu_list); - up(&spu_mutex); -} -EXPORT_SYMBOL_GPL(spu_free); + struct spu *spu; + int rc = 0; -static int spu_handle_mm_fault(struct spu *spu) -{ - struct mm_struct *mm = spu->mm; - struct vm_area_struct *vma; - u64 ea, dsisr, is_write; - int ret; + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) { + rc = sysfs_create_group(&spu->dev.kobj, attrs); - ea = spu->dar; - dsisr = spu->dsisr; -#if 0 - if (!IS_VALID_EA(ea)) { - return -EFAULT; - } -#endif /* XXX */ - if (mm == NULL) { - return -EFAULT; - } - if (mm->pgd == NULL) { - return -EFAULT; - } + /* we're in trouble here, but try unwinding anyway */ + if (rc) { + printk(KERN_ERR "%s: can't create sysfs group '%s'\n", + __func__, attrs->name); - down_read(&mm->mmap_sem); - vma = find_vma(mm, ea); - if (!vma) - goto bad_area; - if (vma->vm_start <= ea) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; -#if 0 - if (expand_stack(vma, ea)) - goto bad_area; -#endif /* XXX */ -good_area: - is_write = dsisr & MFC_DSISR_ACCESS_PUT; - if (is_write) { - if (!(vma->vm_flags & VM_WRITE)) - goto bad_area; - } else { - if (dsisr & MFC_DSISR_ACCESS_DENIED) - goto bad_area; - if (!(vma->vm_flags & (VM_READ | VM_EXEC))) - goto bad_area; - } - ret = 0; - switch (handle_mm_fault(mm, vma, ea, is_write)) { - case VM_FAULT_MINOR: - current->min_flt++; - break; - case VM_FAULT_MAJOR: - current->maj_flt++; - break; - case VM_FAULT_SIGBUS: - ret = -EFAULT; - goto bad_area; - case VM_FAULT_OOM: - ret = -ENOMEM; - goto bad_area; - default: - BUG(); + list_for_each_entry_continue_reverse(spu, + &spu_full_list, full_list) + sysfs_remove_group(&spu->dev.kobj, attrs); + break; + } } - up_read(&mm->mmap_sem); - return ret; -bad_area: - up_read(&mm->mmap_sem); - return -EFAULT; + mutex_unlock(&spu_full_list_mutex); + + return rc; } +EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); -int spu_irq_class_1_bottom(struct spu *spu) + +void spu_remove_dev_attr(struct device_attribute *attr) { - u64 ea, dsisr, access, error = 0UL; - int ret = 0; + struct spu *spu; - ea = spu->dar; - dsisr = spu->dsisr; - if (dsisr & MFC_DSISR_PTE_NOT_FOUND) { - access = (_PAGE_PRESENT | _PAGE_USER); - access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; - if (hash_page(ea, access, 0x300) != 0) - error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; - } - if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) || - (dsisr & MFC_DSISR_ACCESS_DENIED)) { - if ((ret = spu_handle_mm_fault(spu)) != 0) - error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; - else - error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; - } - spu->dar = 0UL; - spu->dsisr = 0UL; - if (!error) { - spu_restart_dma(spu); - } else { - __spu_trap_invalid_dma(spu); - } - return ret; + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) + device_remove_file(&spu->dev, attr); + mutex_unlock(&spu_full_list_mutex); } +EXPORT_SYMBOL_GPL(spu_remove_dev_attr); -void spu_irq_setaffinity(struct spu *spu, int cpu) +void spu_remove_dev_attr_group(struct attribute_group *attrs) { - u64 target = iic_get_target_id(cpu); - u64 route = target << 48 | target << 32 | target << 16; - spu_int_route_set(spu, route); + struct spu *spu; + + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) + sysfs_remove_group(&spu->dev.kobj, attrs); + mutex_unlock(&spu_full_list_mutex); } -EXPORT_SYMBOL_GPL(spu_irq_setaffinity); +EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); -static void __iomem * __init map_spe_prop(struct device_node *n, - const char *name) +static int spu_create_dev(struct spu *spu) { - struct address_prop { - unsigned long address; - unsigned int len; - } __attribute__((packed)) *prop; - - void *p; - int proplen; - - p = get_property(n, name, &proplen); - if (proplen != sizeof (struct address_prop)) - return NULL; + int ret; - prop = p; + spu->dev.id = spu->number; + spu->dev.bus = &spu_subsys; + ret = device_register(&spu->dev); + if (ret) { + printk(KERN_ERR "Can't register SPU %d with sysfs\n", + spu->number); + return ret; + } - return ioremap(prop->address, prop->len); -} + sysfs_add_device_to_node(&spu->dev, spu->node); -static void spu_unmap(struct spu *spu) -{ - iounmap(spu->priv2); - iounmap(spu->priv1); - iounmap(spu->problem); - iounmap((u8 __iomem *)spu->local_store); + return 0; } -static int __init spu_map_device(struct spu *spu, struct device_node *spe) +static int __init create_spu(void *data) { - char *prop; + struct spu *spu; int ret; + static int number; + unsigned long flags; + struct timespec ts; - ret = -ENODEV; - prop = get_property(spe, "isrc", NULL); - if (!prop) + ret = -ENOMEM; + spu = kzalloc(sizeof (*spu), GFP_KERNEL); + if (!spu) goto out; - spu->isrc = *(unsigned int *)prop; - spu->name = get_property(spe, "name", NULL); - if (!spu->name) - goto out; + spu->alloc_state = SPU_FREE; - prop = get_property(spe, "local-store", NULL); - if (!prop) - goto out; - spu->local_store_phys = *(unsigned long *)prop; + spin_lock_init(&spu->register_lock); + spin_lock(&spu_lock); + spu->number = number++; + spin_unlock(&spu_lock); - /* we use local store as ram, not io memory */ - spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); - if (!spu->local_store) - goto out; + ret = spu_create_spu(spu, data); + + if (ret) + goto out_free; + + spu_mfc_sdr_setup(spu); + spu_mfc_sr1_set(spu, 0x33); + ret = spu_request_irqs(spu); + if (ret) + goto out_destroy; + + ret = spu_create_dev(spu); + if (ret) + goto out_free_irqs; + + mutex_lock(&cbe_spu_info[spu->node].list_mutex); + list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); + cbe_spu_info[spu->node].n_spus++; + mutex_unlock(&cbe_spu_info[spu->node].list_mutex); - spu->problem= map_spe_prop(spe, "problem"); - if (!spu->problem) - goto out_unmap; + mutex_lock(&spu_full_list_mutex); + spin_lock_irqsave(&spu_full_list_lock, flags); + list_add(&spu->full_list, &spu_full_list); + spin_unlock_irqrestore(&spu_full_list_lock, flags); + mutex_unlock(&spu_full_list_mutex); - spu->priv1= map_spe_prop(spe, "priv1"); - /* priv1 is not available on a hypervisor */ + spu->stats.util_state = SPU_UTIL_IDLE_LOADED; + ktime_get_ts(&ts); + spu->stats.tstamp = timespec_to_ns(&ts); + + INIT_LIST_HEAD(&spu->aff_list); - spu->priv2= map_spe_prop(spe, "priv2"); - if (!spu->priv2) - goto out_unmap; - ret = 0; goto out; -out_unmap: - spu_unmap(spu); +out_free_irqs: + spu_free_irqs(spu); +out_destroy: + spu_destroy_spu(spu); +out_free: + kfree(spu); out: return ret; } -static int __init find_spu_node_id(struct device_node *spe) +static const char *spu_state_names[] = { + "user", "system", "iowait", "idle" +}; + +static unsigned long long spu_acct_time(struct spu *spu, + enum spu_utilization_state state) { - unsigned int *id; - struct device_node *cpu; + struct timespec ts; + unsigned long long time = spu->stats.times[state]; + + /* + * If the spu is idle or the context is stopped, utilization + * statistics are not updated. Apply the time delta from the + * last recorded state of the spu. + */ + if (spu->stats.util_state == state) { + ktime_get_ts(&ts); + time += timespec_to_ns(&ts) - spu->stats.tstamp; + } + + return time / NSEC_PER_MSEC; +} - cpu = spe->parent->parent; - id = (unsigned int *)get_property(cpu, "node-id", NULL); - return id ? *id : 0; +static ssize_t spu_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spu *spu = container_of(dev, struct spu, dev); + + return sprintf(buf, "%s %llu %llu %llu %llu " + "%llu %llu %llu %llu %llu %llu %llu %llu\n", + spu_state_names[spu->stats.util_state], + spu_acct_time(spu, SPU_UTIL_USER), + spu_acct_time(spu, SPU_UTIL_SYSTEM), + spu_acct_time(spu, SPU_UTIL_IOWAIT), + spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), + spu->stats.vol_ctx_switch, + spu->stats.invol_ctx_switch, + spu->stats.slb_flt, + spu->stats.hash_flt, + spu->stats.min_flt, + spu->stats.maj_flt, + spu->stats.class2_intr, + spu->stats.libassist); } -static int __init create_spu(struct device_node *spe) +static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); + +#ifdef CONFIG_KEXEC + +struct crash_spu_info { + struct spu *spu; + u32 saved_spu_runcntl_RW; + u32 saved_spu_status_R; + u32 saved_spu_npc_RW; + u64 saved_mfc_sr1_RW; + u64 saved_mfc_dar; + u64 saved_mfc_dsisr; +}; + +#define CRASH_NUM_SPUS 16 /* Enough for current hardware */ +static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; + +static void crash_kexec_stop_spus(void) { struct spu *spu; - int ret; - static int number; + int i; + u64 tmp; - ret = -ENOMEM; - spu = kmalloc(sizeof (*spu), GFP_KERNEL); - if (!spu) - goto out; + for (i = 0; i < CRASH_NUM_SPUS; i++) { + if (!crash_spu_info[i].spu) + continue; - ret = spu_map_device(spu, spe); - if (ret) - goto out_free; + spu = crash_spu_info[i].spu; - spu->node = find_spu_node_id(spe); - spu->stop_code = 0; - spu->slb_replace = 0; - spu->mm = NULL; - spu->ctx = NULL; - spu->rq = NULL; - spu->pid = 0; - spu->class_0_pending = 0; - spu->flags = 0UL; - spu->dar = 0UL; - spu->dsisr = 0UL; - spin_lock_init(&spu->register_lock); + crash_spu_info[i].saved_spu_runcntl_RW = + in_be32(&spu->problem->spu_runcntl_RW); + crash_spu_info[i].saved_spu_status_R = + in_be32(&spu->problem->spu_status_R); + crash_spu_info[i].saved_spu_npc_RW = + in_be32(&spu->problem->spu_npc_RW); - spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); - spu_mfc_sr1_set(spu, 0x33); + crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); + crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); + tmp = spu_mfc_sr1_get(spu); + crash_spu_info[i].saved_mfc_sr1_RW = tmp; - spu->ibox_callback = NULL; - spu->wbox_callback = NULL; - spu->stop_callback = NULL; + tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; + spu_mfc_sr1_set(spu, tmp); - down(&spu_mutex); - spu->number = number++; - ret = spu_request_irqs(spu); - if (ret) - goto out_unmap; + __delay(200); + } +} - list_add(&spu->list, &spu_list); - up(&spu_mutex); +static void crash_register_spus(struct list_head *list) +{ + struct spu *spu; + int ret; - pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", - spu->name, spu->isrc, spu->local_store, - spu->problem, spu->priv1, spu->priv2, spu->number); - goto out; + list_for_each_entry(spu, list, full_list) { + if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) + continue; -out_unmap: - up(&spu_mutex); - spu_unmap(spu); -out_free: - kfree(spu); -out: - return ret; + crash_spu_info[spu->number].spu = spu; + } + + ret = crash_shutdown_register(&crash_kexec_stop_spus); + if (ret) + printk(KERN_ERR "Could not register SPU crash handler"); } -static void destroy_spu(struct spu *spu) +#else +static inline void crash_register_spus(struct list_head *list) { - list_del_init(&spu->list); - - spu_free_irqs(spu); - spu_unmap(spu); - kfree(spu); } +#endif -static void cleanup_spu_base(void) +static void spu_shutdown(void) { - struct spu *spu, *tmp; - down(&spu_mutex); - list_for_each_entry_safe(spu, tmp, &spu_list, list) - destroy_spu(spu); - up(&spu_mutex); + struct spu *spu; + + mutex_lock(&spu_full_list_mutex); + list_for_each_entry(spu, &spu_full_list, full_list) { + spu_free_irqs(spu); + spu_destroy_spu(spu); + } + mutex_unlock(&spu_full_list_mutex); } -module_exit(cleanup_spu_base); + +static struct syscore_ops spu_syscore_ops = { + .shutdown = spu_shutdown, +}; static int __init init_spu_base(void) { - struct device_node *node; - int ret; + int i, ret = 0; - ret = -ENODEV; - for (node = of_find_node_by_type(NULL, "spe"); - node; node = of_find_node_by_type(node, "spe")) { - ret = create_spu(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %s\n", - __FUNCTION__, node->name); - cleanup_spu_base(); - break; - } + for (i = 0; i < MAX_NUMNODES; i++) { + mutex_init(&cbe_spu_info[i].list_mutex); + INIT_LIST_HEAD(&cbe_spu_info[i].spus); } - /* in some old firmware versions, the spe is called 'spc', so we - look for that as well */ - for (node = of_find_node_by_type(NULL, "spc"); - node; node = of_find_node_by_type(node, "spc")) { - ret = create_spu(node); - if (ret) { - printk(KERN_WARNING "%s: Error initializing %s\n", - __FUNCTION__, node->name); - cleanup_spu_base(); - break; - } + + if (!spu_management_ops) + goto out; + + /* create system subsystem for spus */ + ret = subsys_system_register(&spu_subsys, NULL); + if (ret) + goto out; + + ret = spu_enumerate_spus(create_spu); + + if (ret < 0) { + printk(KERN_WARNING "%s: Error initializing spus\n", + __func__); + goto out_unregister_subsys; } + + if (ret > 0) + fb_append_extra_logo(&logo_spe_clut224, ret); + + mutex_lock(&spu_full_list_mutex); + xmon_register_spus(&spu_full_list); + crash_register_spus(&spu_full_list); + mutex_unlock(&spu_full_list_mutex); + spu_add_dev_attr(&dev_attr_stat); + register_syscore_ops(&spu_syscore_ops); + + spu_init_affinity(); + + return 0; + + out_unregister_subsys: + bus_unregister(&spu_subsys); + out: return ret; } module_init(init_spu_base); |
