diff options
Diffstat (limited to 'arch/powerpc/platforms/cell')
24 files changed, 2486 insertions, 1141 deletions
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 6a02d51086c..352bbbacde9 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -5,15 +5,24 @@ config SPU_FS tristate "SPU file system" default m depends on PPC_CELL + select SPU_BASE help The SPU file system is used to access Synergistic Processing Units on machines implementing the Broadband Processor Architecture. +config SPU_BASE + bool + default n + config SPUFS_MMAP bool depends on SPU_FS && SPARSEMEM select MEMORY_HOTPLUG default y +config CBE_RAS + bool "RAS features for bare metal Cell BE" + default y + endmenu diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index e570bad0639..c89cdd67383 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -1,16 +1,15 @@ -obj-y += interrupt.o iommu.o setup.o spider-pic.o -obj-y += pervasive.o +obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \ + cbe_regs.o spider-pic.o pervasive.o +obj-$(CONFIG_CBE_RAS) += ras.o -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SPU_FS) += spu-base.o spufs/ - -spu-base-y += spu_base.o spu_priv1.o +ifeq ($(CONFIG_SMP),y) +obj-$(CONFIG_PPC_CELL_NATIVE) += smp.o +endif # needed only when building loadable spufs.ko -spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o -obj-y += $(spufs-modular-m) - -# always needed in kernel -spufs-builtin-$(CONFIG_SPU_FS) += spu_callbacks.o -obj-y += $(spufs-builtin-y) $(spufs-builtin-m) +spufs-modular-$(CONFIG_SPU_FS) += spu_syscalls.o +spu-priv1-$(CONFIG_PPC_CELL_NATIVE) += spu_priv1_mmio.o +obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \ + $(spufs-modular-m) \ + $(spu-priv1-y) spufs/ diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c new file mode 100644 index 00000000000..2dfde61c841 --- /dev/null +++ b/arch/powerpc/platforms/cell/cbe_regs.c @@ -0,0 +1,128 @@ +/* + * cbe_regs.c + * + * Accessor routines for the various MMIO register blocks of the CBE + * + * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + */ + + +#include <linux/config.h> +#include <linux/percpu.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/pgtable.h> +#include <asm/prom.h> +#include <asm/ptrace.h> + +#include "cbe_regs.h" + +#define MAX_CBE 2 + +/* + * Current implementation uses "cpu" nodes. We build our own mapping + * array of cpu numbers to cpu nodes locally for now to allow interrupt + * time code to have a fast path rather than call of_get_cpu_node(). If + * we implement cpu hotplug, we'll have to install an appropriate norifier + * in order to release references to the cpu going away + */ +static struct cbe_regs_map +{ + struct device_node *cpu_node; + struct cbe_pmd_regs __iomem *pmd_regs; + struct cbe_iic_regs __iomem *iic_regs; +} cbe_regs_maps[MAX_CBE]; +static int cbe_regs_map_count; + +static struct cbe_thread_map +{ + struct device_node *cpu_node; + struct cbe_regs_map *regs; +} cbe_thread_map[NR_CPUS]; + +static struct cbe_regs_map *cbe_find_map(struct device_node *np) +{ + int i; + + for (i = 0; i < cbe_regs_map_count; i++) + if (cbe_regs_maps[i].cpu_node == np) + return &cbe_regs_maps[i]; + return NULL; +} + +struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) +{ + struct cbe_regs_map *map = cbe_find_map(np); + if (map == NULL) + return NULL; + return map->pmd_regs; +} + +struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) +{ + struct cbe_regs_map *map = cbe_thread_map[cpu].regs; + if (map == NULL) + return NULL; + return map->pmd_regs; +} + + +struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) +{ + struct cbe_regs_map *map = cbe_find_map(np); + if (map == NULL) + return NULL; + return map->iic_regs; +} +struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) +{ + struct cbe_regs_map *map = cbe_thread_map[cpu].regs; + if (map == NULL) + return NULL; + return map->iic_regs; +} + +void __init cbe_regs_init(void) +{ + int i; + struct device_node *cpu; + + /* Build local fast map of CPUs */ + for_each_cpu(i) + cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL); + + /* Find maps for each device tree CPU */ + for_each_node_by_type(cpu, "cpu") { + struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++]; + + /* That hack must die die die ! */ + struct address_prop { + unsigned long address; + unsigned int len; + } __attribute__((packed)) *prop; + + + if (cbe_regs_map_count > MAX_CBE) { + printk(KERN_ERR "cbe_regs: More BE chips than supported" + "!\n"); + cbe_regs_map_count--; + return; + } + map->cpu_node = cpu; + for_each_cpu(i) + if (cbe_thread_map[i].cpu_node == cpu) + cbe_thread_map[i].regs = map; + + prop = (struct address_prop *)get_property(cpu, "pervasive", + NULL); + if (prop != NULL) + map->pmd_regs = ioremap(prop->address, prop->len); + + prop = (struct address_prop *)get_property(cpu, "iic", + NULL); + if (prop != NULL) + map->iic_regs = ioremap(prop->address, prop->len); + } +} + diff --git a/arch/powerpc/platforms/cell/cbe_regs.h b/arch/powerpc/platforms/cell/cbe_regs.h new file mode 100644 index 00000000000..e76e4a6af5b --- /dev/null +++ b/arch/powerpc/platforms/cell/cbe_regs.h @@ -0,0 +1,129 @@ +/* + * cbe_regs.h + * + * This file is intended to hold the various register definitions for CBE + * on-chip system devices (memory controller, IO controller, etc...) + * + * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. + */ + +#ifndef CBE_REGS_H +#define CBE_REGS_H + +/* + * + * Some HID register definitions + * + */ + +/* CBE specific HID0 bits */ +#define HID0_CBE_THERM_WAKEUP 0x0000020000000000ul +#define HID0_CBE_SYSERR_WAKEUP 0x0000008000000000ul +#define HID0_CBE_THERM_INT_EN 0x0000000400000000ul +#define HID0_CBE_SYSERR_INT_EN 0x0000000200000000ul + + +/* + * + * Pervasive unit register definitions + * + */ + +struct cbe_pmd_regs { + u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */ + + /* Thermal Sensor Registers */ + u64 ts_ctsr1; /* 0x0800 */ + u64 ts_ctsr2; /* 0x0808 */ + u64 ts_mtsr1; /* 0x0810 */ + u64 ts_mtsr2; /* 0x0818 */ + u64 ts_itr1; /* 0x0820 */ + u64 ts_itr2; /* 0x0828 */ + u64 ts_gitr; /* 0x0830 */ + u64 ts_isr; /* 0x0838 */ + u64 ts_imr; /* 0x0840 */ + u64 tm_cr1; /* 0x0848 */ + u64 tm_cr2; /* 0x0850 */ + u64 tm_simr; /* 0x0858 */ + u64 tm_tpr; /* 0x0860 */ + u64 tm_str1; /* 0x0868 */ + u64 tm_str2; /* 0x0870 */ + u64 tm_tsr; /* 0x0878 */ + + /* Power Management */ + u64 pm_control; /* 0x0880 */ +#define CBE_PMD_PAUSE_ZERO_CONTROL 0x10000 + u64 pm_status; /* 0x0888 */ + + /* Time Base Register */ + u64 tbr; /* 0x0890 */ + + u8 pad_0x0898_0x0c00 [0x0c00 - 0x0898]; /* 0x0898 */ + + /* Fault Isolation Registers */ + u64 checkstop_fir; /* 0x0c00 */ + u64 recoverable_fir; + u64 spec_att_mchk_fir; + u64 fir_mode_reg; + u64 fir_enable_mask; + + u8 pad_0x0c28_0x1000 [0x1000 - 0x0c28]; /* 0x0c28 */ +}; + +extern struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np); +extern struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu); + +/* + * + * IIC unit register definitions + * + */ + +struct cbe_iic_pending_bits { + u32 data; + u8 flags; + u8 class; + u8 source; + u8 prio; +}; + +#define CBE_IIC_IRQ_VALID 0x80 +#define CBE_IIC_IRQ_IPI 0x40 + +struct cbe_iic_thread_regs { + struct cbe_iic_pending_bits pending; + struct cbe_iic_pending_bits pending_destr; + u64 generate; + u64 prio; +}; + +struct cbe_iic_regs { + u8 pad_0x0000_0x0400[0x0400 - 0x0000]; /* 0x0000 */ + + /* IIC interrupt registers */ + struct cbe_iic_thread_regs thread[2]; /* 0x0400 */ + u64 iic_ir; /* 0x0440 */ + u64 iic_is; /* 0x0448 */ + + u8 pad_0x0450_0x0500[0x0500 - 0x0450]; /* 0x0450 */ + + /* IOC FIR */ + u64 ioc_fir_reset; /* 0x0500 */ + u64 ioc_fir_set; + u64 ioc_checkstop_enable; + u64 ioc_fir_error_mask; + u64 ioc_syserr_enable; + u64 ioc_fir; + + u8 pad_0x0530_0x1000[0x1000 - 0x0530]; /* 0x0530 */ +}; + +extern struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np); +extern struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu); + + +/* Init this module early */ +extern void cbe_regs_init(void); + + +#endif /* CBE_REGS_H */ diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 978be1c30c1..f4e2d8805c9 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -33,29 +33,10 @@ #include <asm/ptrace.h> #include "interrupt.h" - -struct iic_pending_bits { - u32 data; - u8 flags; - u8 class; - u8 source; - u8 prio; -}; - -enum iic_pending_flags { - IIC_VALID = 0x80, - IIC_IPI = 0x40, -}; - -struct iic_regs { - struct iic_pending_bits pending; - struct iic_pending_bits pending_destr; - u64 generate; - u64 prio; -}; +#include "cbe_regs.h" struct iic { - struct iic_regs __iomem *regs; + struct cbe_iic_thread_regs __iomem *regs; u8 target_id; }; @@ -115,7 +96,7 @@ static struct hw_interrupt_type iic_pic = { .end = iic_end, }; -static int iic_external_get_irq(struct iic_pending_bits pending) +static int iic_external_get_irq(struct cbe_iic_pending_bits pending) { int irq; unsigned char node, unit; @@ -136,8 +117,7 @@ static int iic_external_get_irq(struct iic_pending_bits pending) * One of these units can be connected * to an external interrupt controller. */ - if (pending.prio > 0x3f || - pending.class != 2) + if (pending.class != 2) break; irq = IIC_EXT_OFFSET + spider_get_irq(node) @@ -168,15 +148,15 @@ int iic_get_irq(struct pt_regs *regs) { struct iic *iic; int irq; - struct iic_pending_bits pending; + struct cbe_iic_pending_bits pending; iic = &__get_cpu_var(iic); *(unsigned long *) &pending = in_be64((unsigned long __iomem *) &iic->regs->pending_destr); irq = -1; - if (pending.flags & IIC_VALID) { - if (pending.flags & IIC_IPI) { + if (pending.flags & CBE_IIC_IRQ_VALID) { + if (pending.flags & CBE_IIC_IRQ_IPI) { irq = IIC_IPI_OFFSET + (pending.prio >> 4); /* if (irq > 0x80) @@ -226,7 +206,7 @@ static int setup_iic_hardcoded(void) regs += 0x20; printk(KERN_INFO "IIC for CPU %d at %lx\n", cpu, regs); - iic->regs = ioremap(regs, sizeof(struct iic_regs)); + iic->regs = ioremap(regs, sizeof(struct cbe_iic_thread_regs)); iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); } @@ -267,12 +247,12 @@ static int setup_iic(void) } iic = &per_cpu(iic, np[0]); - iic->regs = ioremap(regs[0], sizeof(struct iic_regs)); + iic->regs = ioremap(regs[0], sizeof(struct cbe_iic_thread_regs)); iic->target_id = ((np[0] & 2) << 3) + ((np[0] & 1) ? 0xf : 0xe); printk("IIC for CPU %d at %lx mapped to %p\n", np[0], regs[0], iic->regs); iic = &per_cpu(iic, np[1]); - iic->regs = ioremap(regs[2], sizeof(struct iic_regs)); + iic->regs = ioremap(regs[2], sizeof(struct cbe_iic_thread_regs)); iic->target_id = ((np[1] & 2) << 3) + ((np[1] & 1) ? 0xf : 0xe); printk("IIC for CPU %d at %lx mapped to %p\n", np[1], regs[2], iic->regs); diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index a49ceb799a8..a35004e14c6 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -473,6 +473,16 @@ static int cell_dma_supported(struct device *dev, u64 mask) return mask < 0x100000000ull; } +static struct dma_mapping_ops cell_iommu_ops = { + .alloc_coherent = cell_alloc_coherent, + .free_coherent = cell_free_coherent, + .map_single = cell_map_single, + .unmap_single = cell_unmap_single, + .map_sg = cell_map_sg, + .unmap_sg = cell_unmap_sg, + .dma_supported = cell_dma_supported, +}; + void cell_init_iommu(void) { int setup_bus = 0; @@ -498,11 +508,5 @@ void cell_init_iommu(void) } } - pci_dma_ops.alloc_coherent = cell_alloc_coherent; - pci_dma_ops.free_coherent = cell_free_coherent; - pci_dma_ops.map_single = cell_map_single; - pci_dma_ops.unmap_single = cell_unmap_single; - pci_dma_ops.map_sg = cell_map_sg; - pci_dma_ops.unmap_sg = cell_unmap_sg; - pci_dma_ops.dma_supported = cell_dma_supported; + pci_dma_ops = cell_iommu_ops; } diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index 7eed8c62451..695ac4e1617 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -37,36 +37,28 @@ #include <asm/reg.h> #include "pervasive.h" +#include "cbe_regs.h" static DEFINE_SPINLOCK(cbe_pervasive_lock); -struct cbe_pervasive { - struct pmd_regs __iomem *regs; - unsigned int thread; -}; - -/* can't use per_cpu from setup_arch */ -static struct cbe_pervasive cbe_pervasive[NR_CPUS]; static void __init cbe_enable_pause_zero(void) { unsigned long thread_switch_control; unsigned long temp_register; - struct cbe_pervasive *p; - int thread; + struct cbe_pmd_regs __iomem *pregs; spin_lock_irq(&cbe_pervasive_lock); - p = &cbe_pervasive[smp_processor_id()]; - - if (!cbe_pervasive->regs) + pregs = cbe_get_cpu_pmd_regs(smp_processor_id()); + if (pregs == NULL) goto out; pr_debug("Power Management: CPU %d\n", smp_processor_id()); /* Enable Pause(0) control bit */ - temp_register = in_be64(&p->regs->pm_control); + temp_register = in_be64(&pregs->pm_control); - out_be64(&p->regs->pm_control, - temp_register|PMD_PAUSE_ZERO_CONTROL); + out_be64(&pregs->pm_control, + temp_register | CBE_PMD_PAUSE_ZERO_CONTROL); /* Enable DEC and EE interrupt request */ thread_switch_control = mfspr(SPRN_TSC_CELL); @@ -75,25 +67,16 @@ static void __init cbe_enable_pause_zero(void) switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { case CTRL_CT0: thread_switch_control |= TSC_CELL_DEC_ENABLE_0; - thread = 0; break; case CTRL_CT1: thread_switch_control |= TSC_CELL_DEC_ENABLE_1; - thread = 1; break; default: printk(KERN_WARNING "%s: unknown configuration\n", __FUNCTION__); - thread = -1; break; } - if (p->thread != thread) - printk(KERN_WARNING "%s: device tree inconsistant, " - "cpu %i: %d/%d\n", __FUNCTION__, - smp_processor_id(), - p->thread, thread); - mtspr(SPRN_TSC_CELL, thread_switch_control); out: @@ -104,6 +87,11 @@ static void cbe_idle(void) { unsigned long ctrl; + /* Why do we do that on every idle ? Couldn't that be done once for + * all or do we lose the state some way ? Also, the pm_control + * register setting, that can't be set once at boot ? We really want + * to move that away in order to implement a simple powersave + */ cbe_enable_pause_zero(); while (1) { @@ -152,8 +140,15 @@ static int cbe_system_reset_exception(struct pt_regs *regs) timer_interrupt(regs); break; case SRR1_WAKEMT: - /* no action required */ break; +#ifdef CONFIG_CBE_RAS + case SRR1_WAKESYSERR: + cbe_system_error_exception(regs); + break; + case SRR1_WAKETHERM: + cbe_thermal_exception(regs); + break; +#endif /* CONFIG_CBE_RAS */ default: /* do system reset */ return 0; @@ -162,68 +157,11 @@ static int cbe_system_reset_exception(struct pt_regs *regs) return 1; } -static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p) -{ - struct device_node *node; - unsigned int *int_servers; - char *addr; - unsigned long real_address; - unsigned int size; - - struct pmd_regs __iomem *pmd_mmio_area; - int hardid, thread; - int proplen; - - pmd_mmio_area = NULL; - hardid = get_hard_smp_processor_id(cpu); - for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) { - int_servers = (void *) get_property(node, - "ibm,ppc-interrupt-server#s", &proplen); - if (!int_servers) { - printk(KERN_WARNING "%s misses " - "ibm,ppc-interrupt-server#s property", - node->full_name); - continue; - } - for (thread = 0; thread < proplen / sizeof (int); thread++) { - if (hardid == int_servers[thread]) { - addr = get_property(node, "pervasive", NULL); - goto found; - } - } - } - - printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu); - return -EINVAL; - -found: - real_address = *(unsigned long*) addr; - addr += sizeof (unsigned long); - size = *(unsigned int*) addr; - - pr_debug("pervasive area for CPU %d at %lx, size %x\n", - cpu, real_address, size); - p->regs = ioremap(real_address, size); - p->thread = thread; - return 0; -} - -void __init cell_pervasive_init(void) +void __init cbe_pervasive_init(void) { - struct cbe_pervasive *p; - int cpu; - int ret; - if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) return; - for_each_possible_cpu(cpu) { - p = &cbe_pervasive[cpu]; - ret = cbe_find_pmd_mmio(cpu, p); - if (ret) - return; - } - ppc_md.idle_loop = cbe_idle; ppc_md.system_reset_exception = cbe_system_reset_exception; } diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h index da1fb85ca3e..7b50947f804 100644 --- a/arch/powerpc/platforms/cell/pervasive.h +++ b/arch/powerpc/platforms/cell/pervasive.h @@ -25,38 +25,9 @@ #ifndef PERVASIVE_H #define PERVASIVE_H -struct pmd_regs { - u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */ - - /* Thermal Sensor Registers */ - u64 ts_ctsr1; /* 0x0800 */ - u64 ts_ctsr2; /* 0x0808 */ - u64 ts_mtsr1; /* 0x0810 */ - u64 ts_mtsr2; /* 0x0818 */ - u64 ts_itr1; /* 0x0820 */ - u64 ts_itr2; /* 0x0828 */ - u64 ts_gitr; /* 0x0830 */ - u64 ts_isr; /* 0x0838 */ - u64 ts_imr; /* 0x0840 */ - u64 tm_cr1; /* 0x0848 */ - u64 tm_cr2; /* 0x0850 */ - u64 tm_simr; /* 0x0858 */ - u64 tm_tpr; /* 0x0860 */ - u64 tm_str1; /* 0x0868 */ - u64 tm_str2; /* 0x0870 */ - u64 tm_tsr; /* 0x0878 */ - - /* Power Management */ - u64 pm_control; /* 0x0880 */ -#define PMD_PAUSE_ZERO_CONTROL 0x10000 - u64 pm_status; /* 0x0888 */ - - /* Time Base Register */ - u64 tbr; /* 0x0890 */ - - u8 pad_0x0898_0x1000 [0x1000 - 0x0898]; /* 0x0898 */ -}; - -void __init cell_pervasive_init(void); +extern void cbe_pervasive_init(void); +extern void cbe_system_error_exception(struct pt_regs *regs); +extern void cbe_maintenance_exception(struct pt_regs *regs); +extern void cbe_thermal_exception(struct pt_regs *regs); #endif diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c new file mode 100644 index 00000000000..033ad6e2827 --- /dev/null +++ b/arch/powerpc/platforms/cell/ras.c @@ -0,0 +1,112 @@ +#define DEBUG + +#include <linux/config.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/smp.h> + +#include <asm/reg.h> +#include <asm/io.h> +#include <asm/prom.h> +#include <asm/machdep.h> + +#include "ras.h" +#include "cbe_regs.h" + + +static void dump_fir(int cpu) +{ + struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); + struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); + + if (pregs == NULL) + return; + + /* Todo: do some nicer parsing of bits and based on them go down + * to other sub-units FIRs and not only IIC + */ + printk(KERN_ERR "Global Checkstop FIR : 0x%016lx\n", + in_be64(&pregs->checkstop_fir)); + printk(KERN_ERR "Global Recoverable FIR : 0x%016lx\n", + in_be64(&pregs->checkstop_fir)); + printk(KERN_ERR "Global MachineCheck FIR : 0x%016lx\n", + in_be64(&pregs->spec_att_mchk_fir)); + + if (iregs == NULL) + return; + printk(KERN_ERR "IOC FIR : 0x%016lx\n", + in_be64(&iregs->ioc_fir)); + +} + +void cbe_system_error_exception(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); + dump_fir(cpu); + dump_stack(); +} + +void cbe_maintenance_exception(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + /* + * Nothing implemented for the maintenance interrupt at this point + */ + + printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); + dump_stack(); +} + +void cbe_thermal_exception(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + /* + * Nothing implemented for the thermal interrupt at this point + */ + + printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); + dump_stack(); +} + +static int cbe_machine_check_handler(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + + printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); + dump_fir(cpu); + + /* No recovery from this code now, lets continue */ + return 0; +} + +void __init cbe_ras_init(void) +{ + unsigned long hid0; + + /* + * Enable System Error & thermal interrupts and wakeup conditions + */ + + hid0 = mfspr(SPRN_HID0); + hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | + HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; + mtspr(SPRN_HID0, hid0); + mb(); + + /* + * Install machine check handler. Leave setting of precise mode to + * what the firmware did for now + */ + ppc_md.machine_check_exception = cbe_machine_check_handler; + mb(); + + /* + * For now, we assume that IOC_FIR is already set to forward some + * error conditions to the System Error handler. If that is not true + * then it will have to be fixed up here. + */ +} diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h new file mode 100644 index 00000000000..eb7ee54c82a --- /dev/null +++ b/arch/powerpc/platforms/cell/ras.h @@ -0,0 +1,9 @@ +#ifndef RAS_H +#define RAS_H + +extern void cbe_system_error_exception(struct pt_regs *regs); +extern void cbe_maintenance_exception(struct pt_regs *regs); +extern void cbe_thermal_exception(struct pt_regs *regs); +extern void cbe_ras_init(void); + +#endif /* RAS_H */ diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index fd3e5609e3e..3d1831d331e 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -49,10 +49,13 @@ #include <asm/ppc-pci.h> #include <asm/irq.h> #include <asm/spu.h> +#include <asm/spu_priv1.h> #include "interrupt.h" #include "iommu.h" +#include "cbe_regs.h" #include "pervasive.h" +#include "ras.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -81,6 +84,15 @@ static void __init cell_setup_arch(void) { ppc_md.init_IRQ = iic_init_IRQ; ppc_md.get_irq = iic_get_irq; +#ifdef CONFIG_SPU_BASE + spu_priv1_ops = &spu_priv1_mmio_ops; +#endif + + cbe_regs_init(); + +#ifdef CONFIG_CBE_RAS + cbe_ras_init(); +#endif #ifdef CONFIG_SMP smp_init_cell(); @@ -98,7 +110,7 @@ static void __init cell_setup_arch(void) init_pci_config_tokens(); find_and_init_phbs(); spider_init_IRQ(); - cell_pervasive_init(); + cbe_pervasive_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index ad141fe8d52..db82f503ba2 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -34,10 +34,15 @@ #include <asm/prom.h> #include <linux/mutex.h> #include <asm/spu.h> +#include <asm/spu_priv1.h> #include <asm/mmu_context.h> #include "interrupt.h" +const struct spu_priv1_ops *spu_priv1_ops; + +EXPORT_SYMBOL_GPL(spu_priv1_ops); + static int __spu_trap_invalid_dma(struct spu *spu) { pr_debug("%s\n", __FUNCTION__); @@ -71,7 +76,7 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) { struct spu_priv2 __iomem *priv2 = spu->priv2; struct mm_struct *mm = spu->mm; - u64 esid, vsid; + u64 esid, vsid, llp; pr_debug("%s\n", __FUNCTION__); @@ -91,9 +96,14 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) } esid = (ea & ESID_MASK) | SLB_ESID_V; - vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; +#ifdef CONFIG_HUGETLB_PAGE if (in_hugepage_area(mm->context, ea)) - vsid |= SLB_VSID_L; + llp = mmu_psize_defs[mmu_huge_psize].sllp; + else +#endif + llp = mmu_psize_defs[mmu_virtual_psize].sllp; + vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | + SLB_VSID_USER | llp; out_be64(&priv2->slb_index_W, spu->slb_replace); out_be64(&priv2->slb_vsid_RW, vsid); @@ -130,57 +140,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) spu->dar = ea; spu->dsisr = dsisr; mb(); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_mailbox(struct spu *spu) -{ - if (spu->ibox_callback) - spu->ibox_callback(spu); - - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x1); - spin_unlock(&spu->register_lock); - return 0; -} - -static int __spu_trap_stop(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_halt(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->stop_code = in_be32(&spu->problem->spu_status_R); - if (spu->stop_callback) - spu->stop_callback(spu); - return 0; -} - -static int __spu_trap_tag_group(struct spu *spu) -{ - pr_debug("%s\n", __FUNCTION__); - spu->mfc_callback(spu); - return 0; -} - -static int __spu_trap_spubox(struct spu *spu) -{ - if (spu->wbox_callback) - spu->wbox_callback(spu); - - /* atomically disable SPU mailbox interrupts */ - spin_lock(&spu->register_lock); - spu_int_mask_and(spu, 2, ~0x10); - spin_unlock(&spu->register_lock); + spu->stop_callback(spu); return 0; } @@ -191,8 +151,7 @@ spu_irq_class_0(int irq, void *data, struct pt_regs *regs) spu = data; spu->class_0_pending = 1; - if (spu->stop_callback) - spu->stop_callback(spu); + spu->stop_callback(spu); return IRQ_HANDLED; } @@ -270,29 +229,38 @@ spu_irq_class_2(int irq, void *data, stru |