diff options
Diffstat (limited to 'arch/powerpc/platforms/cell')
37 files changed, 325 insertions, 975 deletions
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 2e7ff0c5cf4..9978f594cac 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig @@ -113,34 +113,10 @@ config CBE_THERM default m depends on CBE_RAS && SPU_BASE -config CBE_CPUFREQ - tristate "CBE frequency scaling" - depends on CBE_RAS && CPU_FREQ - default m - help - This adds the cpufreq driver for Cell BE processors. - For details, take a look at <file:Documentation/cpu-freq/>. - If you don't have such processor, say N - -config CBE_CPUFREQ_PMI_ENABLE - bool "CBE frequency scaling using PMI interface" - depends on CBE_CPUFREQ && EXPERIMENTAL - default n - help - Select this, if you want to use the PMI interface - to switch frequencies. Using PMI, the - processor will not only be able to run at lower speed, - but also at lower core voltage. - -config CBE_CPUFREQ_PMI - tristate - depends on CBE_CPUFREQ_PMI_ENABLE - default CBE_CPUFREQ - config PPC_PMI tristate default y - depends on CBE_CPUFREQ_PMI || PPC_IBM_CELL_POWERBUTTON + depends on CPU_FREQ_CBE_PMI || PPC_IBM_CELL_POWERBUTTON help PMI (Platform Management Interrupt) is a way to communicate with the BMC (Baseboard Management Controller). diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index a4a89350bcf..fe053e7c73e 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile @@ -5,9 +5,6 @@ obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ obj-$(CONFIG_CBE_RAS) += ras.o obj-$(CONFIG_CBE_THERM) += cbe_thermal.o -obj-$(CONFIG_CBE_CPUFREQ_PMI) += cbe_cpufreq_pmi.o -obj-$(CONFIG_CBE_CPUFREQ) += cbe-cpufreq.o -cbe-cpufreq-y += cbe_cpufreq_pervasive.o cbe_cpufreq.o obj-$(CONFIG_CBE_CPUFREQ_SPU_GOVERNOR) += cpufreq_spudemand.o obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON) += cbe_powerbutton.o diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 40a6e34793b..85825b5401e 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -67,7 +67,7 @@ struct axon_msic { - struct irq_host *irq_host; + struct irq_domain *irq_domain; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) static struct axon_msic *find_msi_translator(struct pci_dev *dev) { - struct irq_host *irq_host; + struct irq_domain *irq_domain; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; @@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev) goto out_error; } - irq_host = irq_find_host(dn); - if (!irq_host) { - dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", + irq_domain = irq_find_host(dn); + if (!irq_domain) { + dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n", dn->full_name); goto out_error; } - msic = irq_host->host_data; + msic = irq_domain->host_data; out_error: of_node_put(dn); @@ -276,11 +276,8 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { - virq = irq_create_direct_mapping(msic->irq_host); + virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); @@ -318,7 +315,7 @@ static struct irq_chip msic_irq_chip = { .name = "AXON-MSI", }; -static int msic_host_map(struct irq_host *h, unsigned int virq, +static int msic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); @@ -327,7 +324,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static struct irq_host_ops msic_host_ops = { +static const struct irq_domain_ops msic_host_ops = { .map = msic_host_map, }; @@ -337,7 +334,7 @@ static void axon_msi_shutdown(struct platform_device *device) u32 tmp; pr_devel("axon_msi: disabling %s\n", - msic->irq_host->of_node->full_name); + msic->irq_domain->of_node->full_name); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); @@ -392,16 +389,14 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, - NR_IRQS, &msic_host_ops, 0); - if (!msic->irq_host) { - printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); + if (!msic->irq_domain) { + printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); goto out_free_fifo; } - msic->irq_host->host_data = msic; - irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c index 852592b2b71..affcf566d46 100644 --- a/arch/powerpc/platforms/cell/beat.c +++ b/arch/powerpc/platforms/cell/beat.c @@ -136,9 +136,9 @@ ssize_t beat_nvram_get_size(void) return BEAT_NVRAM_SIZE; } -int beat_set_xdabr(unsigned long dabr) +int beat_set_xdabr(unsigned long dabr, unsigned long dabrx) { - if (beat_set_dabr(dabr, DABRX_KERNEL | DABRX_USER)) + if (beat_set_dabr(dabr, dabrx)) return -1; return 0; } diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h index 32c8efcedc8..bfcb8e351ae 100644 --- a/arch/powerpc/platforms/cell/beat.h +++ b/arch/powerpc/platforms/cell/beat.h @@ -32,7 +32,7 @@ void beat_get_rtc_time(struct rtc_time *); ssize_t beat_nvram_get_size(void); ssize_t beat_nvram_read(char *, size_t, loff_t *); ssize_t beat_nvram_write(char *, size_t, loff_t *); -int beat_set_xdabr(unsigned long); +int beat_set_xdabr(unsigned long, unsigned long); void beat_power_save(void); void beat_kexec_cpu_down(int, int); diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c index 2516c1cf846..d4d245c0d78 100644 --- a/arch/powerpc/platforms/cell/beat_htab.c +++ b/arch/powerpc/platforms/cell/beat_htab.c @@ -88,14 +88,13 @@ static inline unsigned int beat_read_mask(unsigned hpte_group) } static long beat_lpar_hpte_insert(unsigned long hpte_group, - unsigned long va, unsigned long pa, + unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, - int psize, int ssize) + int psize, int apsize, int ssize) { unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; - /* same as iseries */ if (vflags & HPTE_V_SECONDARY) return -1; @@ -104,15 +103,15 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group, "rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, va, pa, rflags, vflags, psize); - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | + hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; - hpte_r = hpte_encode_r(pa, psize) | rflags; + hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); if (rflags & _PAGE_NO_CACHE) - hpte_r &= ~_PAGE_COHERENT; + hpte_r &= ~HPTE_R_M; raw_spin_lock(&beat_htab_lock); lpar_rc = beat_read_mask(hpte_group); @@ -185,14 +184,15 @@ static void beat_lpar_hptab_clear(void) */ static long beat_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, - unsigned long va, - int psize, int ssize, int local) + unsigned long vpn, + int psize, int apsize, + int ssize, int local) { unsigned long lpar_rc; u64 dummy0, dummy1; unsigned long want_v; - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); DBG_LOW(" update: " "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", @@ -221,15 +221,15 @@ static long beat_lpar_hpte_updatepp(unsigned long slot, return 0; } -static long beat_lpar_hpte_find(unsigned long va, int psize) +static long beat_lpar_hpte_find(unsigned long vpn, int psize) { unsigned long hash; unsigned long i, j; long slot; unsigned long want_v, hpte_v; - hash = hpt_hash(va, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M); + want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); for (j = 0; j < 2; j++) { slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; @@ -256,14 +256,15 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { - unsigned long lpar_rc, slot, vsid, va; + unsigned long vpn; + unsigned long lpar_rc, slot, vsid; u64 dummy0, dummy1; vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M); - va = (vsid << 28) | (ea & 0x0fffffff); + vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M); raw_spin_lock(&beat_htab_lock); - slot = beat_lpar_hpte_find(va, psize); + slot = beat_lpar_hpte_find(vpn, psize); BUG_ON(slot == -1); lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, @@ -273,8 +274,9 @@ static void beat_lpar_hpte_updateboltedpp(unsigned long newpp, BUG_ON(lpar_rc != 0); } -static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, - int psize, int ssize, int local) +static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, + int psize, int apsize, + int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; @@ -283,7 +285,7 @@ static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long va, DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", slot, va, psize, local); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); + want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); raw_spin_lock_irqsave(&beat_htab_lock, flags); dummy1 = beat_lpar_hpte_getword0(slot); @@ -312,31 +314,30 @@ void __init hpte_init_beat(void) } static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, - unsigned long va, unsigned long pa, + unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, - int psize, int ssize) + int psize, int apsize, int ssize) { unsigned long lpar_rc; u64 hpte_v, hpte_r, slot; - /* same as iseries */ if (vflags & HPTE_V_SECONDARY) return -1; if (!(vflags & HPTE_V_BOLTED)) - DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, " + DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, " "rflags=%lx, vflags=%lx, psize=%d)\n", - hpte_group, va, pa, rflags, vflags, psize); + hpte_group, vpn, pa, rflags, vflags, psize); - hpte_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M) | + hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) | vflags | HPTE_V_VALID; - hpte_r = hpte_encode_r(pa, psize) | rflags; + hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; if (!(vflags & HPTE_V_BOLTED)) DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); if (rflags & _PAGE_NO_CACHE) - hpte_r &= ~_PAGE_COHERENT; + hpte_r &= ~HPTE_R_M; /* insert into not-volted entry */ lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r, @@ -365,16 +366,17 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group, * already zero. For now I am paranoid. */ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, - unsigned long newpp, - unsigned long va, - int psize, int ssize, int local) + unsigned long newpp, + unsigned long vpn, + int psize, int apsize, + int ssize, int local) { unsigned long lpar_rc; unsigned long want_v; unsigned long pss; - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); - pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; + want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); + pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize]; DBG_LOW(" update: " "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ", @@ -394,17 +396,18 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot, return 0; } -static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long va, - int psize, int ssize, int local) +static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn, + int psize, int apsize, + int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long pss; - DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n", - slot, va, psize, local); - want_v = hpte_encode_v(va, psize, MMU_SEGSIZE_256M); - pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc; + DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", + slot, vpn, psize, local); + want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M); + pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize]; lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss); diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S index 74c81744894..96c80190712 100644 --- a/arch/powerpc/platforms/cell/beat_hvCall.S +++ b/arch/powerpc/platforms/cell/beat_hvCall.S @@ -22,8 +22,6 @@ #include <asm/ppc_asm.h> -#define STK_PARM(i) (48 + ((i)-3)*8) - /* Not implemented on Beat, now */ #define HCALL_INST_PRECALL #define HCALL_INST_POSTCALL @@ -74,7 +72,7 @@ _GLOBAL(beat_hcall_norets8) mr r6,r7 mr r7,r8 mr r8,r9 - ld r10,STK_PARM(r10)(r1) + ld r10,STK_PARAM(R10)(r1) HVSC /* invoke the hypervisor */ @@ -94,7 +92,7 @@ _GLOBAL(beat_hcall1) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -108,7 +106,7 @@ _GLOBAL(beat_hcall1) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) lwz r0,8(r1) @@ -125,7 +123,7 @@ _GLOBAL(beat_hcall2) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -139,7 +137,7 @@ _GLOBAL(beat_hcall2) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) @@ -157,7 +155,7 @@ _GLOBAL(beat_hcall3) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -171,7 +169,7 @@ _GLOBAL(beat_hcall3) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -190,7 +188,7 @@ _GLOBAL(beat_hcall4) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -204,7 +202,7 @@ _GLOBAL(beat_hcall4) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -224,7 +222,7 @@ _GLOBAL(beat_hcall5) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -238,7 +236,7 @@ _GLOBAL(beat_hcall5) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) @@ -259,7 +257,7 @@ _GLOBAL(beat_hcall6) HCALL_INST_PRECALL - std r4,STK_PARM(r4)(r1) /* save ret buffer */ + std r4,STK_PARAM(R4)(r1) /* save ret buffer */ mr r11,r3 mr r3,r5 @@ -273,7 +271,7 @@ _GLOBAL(beat_hcall6) HCALL_INST_POSTCALL - ld r12,STK_PARM(r4)(r1) + ld r12,STK_PARAM(R4)(r1) std r4, 0(r12) std r5, 8(r12) std r6, 16(r12) diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 55015e1f693..9e5dfbcc00a 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock); static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; -static struct irq_host *beatic_host; +static struct irq_domain *beatic_host; /* * In this implementation, "virq" == "IRQ plug number", @@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = { * * Note that the number (virq) is already assigned at upper layer. */ -static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) +static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq) { beat_destruct_irq_plug(virq); } @@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) * * Note that the number (virq) is already assigned at upper layer. */ -static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, +static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { int64_t err; @@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, * Called from irq_create_of_mapping() only. * Note: We have only 1 entry to translate. */ -static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, +static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) +static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np) { /* Match all */ return 1; } -static struct irq_host_ops beatic_pic_host_ops = { +static const struct irq_domain_ops beatic_pic_host_ops = { .map = beatic_pic_host_map, .unmap = beatic_pic_host_unmap, .xlate = beatic_pic_host_xlate, @@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void) ppc_md.get_irq = beatic_get_irq; /* Allocate an irq host */ - beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, - &beatic_pic_host_ops, - 0); + beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL); BUG_ON(beatic_host == NULL); irq_set_default_host(beatic_host); } @@ -250,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c deleted file mode 100644 index d4c39e32f14..00000000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * cpufreq driver for the cell processor - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/cpufreq.h> -#include <linux/module.h> -#include <linux/of_platform.h> - -#include <asm/machdep.h> -#include <asm/prom.h> -#include <asm/cell-regs.h> -#include "cbe_cpufreq.h" - -static DEFINE_MUTEX(cbe_switch_mutex); - - -/* the CBE supports an 8 step frequency scaling */ -static struct cpufreq_frequency_table cbe_freqs[] = { - {1, 0}, - {2, 0}, - {3, 0}, - {4, 0}, - {5, 0}, - {6, 0}, - {8, 0}, - {10, 0}, - {0, CPUFREQ_TABLE_END}, -}; - -/* - * hardware specific functions - */ - -static int set_pmode(unsigned int cpu, unsigned int slow_mode) -{ - int rc; - - if (cbe_cpufreq_has_pmi) - rc = cbe_cpufreq_set_pmode_pmi(cpu, slow_mode); - else - rc = cbe_cpufreq_set_pmode(cpu, slow_mode); - - pr_debug("register contains slow mode %d\n", cbe_cpufreq_get_pmode(cpu)); - - return rc; -} - -/* - * cpufreq functions - */ - -static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) -{ - const u32 *max_freqp; - u32 max_freq; - int i, cur_pmode; - struct device_node *cpu; - - cpu = of_get_cpu_node(policy->cpu, NULL); - - if (!cpu) - return -ENODEV; - - pr_debug("init cpufreq on CPU %d\n", policy->cpu); - - /* - * Let's check we can actually get to the CELL regs - */ - if (!cbe_get_cpu_pmd_regs(policy->cpu) || - !cbe_get_cpu_mic_tm_regs(policy->cpu)) { - pr_info("invalid CBE regs pointers for cpufreq\n"); - return -EINVAL; - } - - max_freqp = of_get_property(cpu, "clock-frequency", NULL); - - of_node_put(cpu); - - if (!max_freqp) - return -EINVAL; - - /* we need the freq in kHz */ - max_freq = *max_freqp / 1000; - - pr_debug("max clock-frequency is at %u kHz\n", max_freq); - pr_debug("initializing frequency table\n"); - - /* initialize frequency table */ - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].index; - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); - } - - /* if DEBUG is enabled set_pmode() measures the latency - * of a transition */ - policy->cpuinfo.transition_latency = 25000; - - cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); - pr_debug("current pmode is at %d\n",cur_pmode); - - policy->cur = cbe_freqs[cur_pmode].frequency; - -#ifdef CONFIG_SMP - cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); -#endif - - cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); - - /* this ensures that policy->cpuinfo_min - * and policy->cpuinfo_max are set correctly */ - return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); -} - -static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) -{ - cpufreq_frequency_table_put_attr(policy->cpu); - return 0; -} - -static int cbe_cpufreq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, cbe_freqs); -} - -static int cbe_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - int rc; - struct cpufreq_freqs freqs; - unsigned int cbe_pmode_new; - - cpufreq_frequency_table_target(policy, - cbe_freqs, - target_freq, - relation, - &cbe_pmode_new); - - freqs.old = policy->cur; - freqs.new = cbe_freqs[cbe_pmode_new].frequency; - freqs.cpu = policy->cpu; - - mutex_lock(&cbe_switch_mutex); - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - pr_debug("setting frequency for cpu %d to %d kHz, " \ - "1/%d of max frequency\n", - policy->cpu, - cbe_freqs[cbe_pmode_new].frequency, - cbe_freqs[cbe_pmode_new].index); - - rc = set_pmode(policy->cpu, cbe_pmode_new); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - mutex_unlock(&cbe_switch_mutex); - - return rc; -} - -static struct cpufreq_driver cbe_cpufreq_driver = { - .verify = cbe_cpufreq_verify, - .target = cbe_cpufreq_target, - .init = cbe_cpufreq_cpu_init, - .exit = cbe_cpufreq_cpu_exit, - .name = "cbe-cpufreq", - .owner = THIS_MODULE, - .flags = CPUFREQ_CONST_LOOPS, -}; - -/* - * module init and destoy - */ - -static int __init cbe_cpufreq_init(void) -{ - if (!machine_is(cell)) - return -ENODEV; - - return cpufreq_register_driver(&cbe_cpufreq_driver); -} - -static void __exit cbe_cpufreq_exit(void) -{ - cpufreq_unregister_driver(&cbe_cpufreq_driver); -} - -module_init(cbe_cpufreq_init); -module_exit(cbe_cpufreq_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.h b/arch/powerpc/platforms/cell/cbe_cpufreq.h deleted file mode 100644 index c1d86bfa92f..00000000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * cbe_cpufreq.h - * - * This file contains the definitions used by the cbe_cpufreq driver. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - */ - -#include <linux/cpufreq.h> -#include <linux/types.h> - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode); -int cbe_cpufreq_get_pmode(int cpu); - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode); - -#if defined(CONFIG_CBE_CPUFREQ_PMI) || defined(CONFIG_CBE_CPUFREQ_PMI_MODULE) -extern bool cbe_cpufreq_has_pmi; -#else -#define cbe_cpufreq_has_pmi (0) -#endif diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c deleted file mode 100644 index 20472e487b6..00000000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pervasive.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * pervasive backend for the cbe_cpufreq driver - * - * This driver makes use of the pervasive unit to - * engage the desired frequency. - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/time.h> -#include <asm/machdep.h> -#include <asm/hw_irq.h> -#include <asm/cell-regs.h> - -#include "cbe_cpufreq.h" - -/* to write to MIC register */ -static u64 MIC_Slow_Fast_Timer_table[] = { - [0 ... 7] = 0x007fc00000000000ull, -}; - -/* more values for the MIC */ -static u64 MIC_Slow_Next_Timer_table[] = { - 0x0000240000000000ull, - 0x0000268000000000ull, - 0x000029C000000000ull, - 0x00002D0000000000ull, - 0x0000300000000000ull, - 0x0000334000000000ull, - 0x000039C000000000ull, - 0x00003FC000000000ull, -}; - - -int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) -{ - struct cbe_pmd_regs __iomem *pmd_regs; - struct cbe_mic_tm_regs __iomem *mic_tm_regs; - unsigned long flags; - u64 value; -#ifdef DEBUG - long time; -#endif - - local_irq_save(flags); - - mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - -#ifdef DEBUG - time = jiffies; -#endif - - out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); - - out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); - out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); - - value = in_be64(&pmd_regs->pmcr); - /* set bits to zero */ - value &= 0xFFFFFFFFFFFFFFF8ull; - /* set bits to next pmode */ - value |= pmode; - - out_be64(&pmd_regs->pmcr, value); - -#ifdef DEBUG - /* wait until new pmode appears in status register */ - value = in_be64(&pmd_regs->pmsr) & 0x07; - while (value != pmode) { - cpu_relax(); - value = in_be64(&pmd_regs->pmsr) & 0x07; - } - - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "pervasive unit\n", time); -#endif - local_irq_restore(flags); - - return 0; -} - - -int cbe_cpufreq_get_pmode(int cpu) -{ - int ret; - struct cbe_pmd_regs __iomem *pmd_regs; - - pmd_regs = cbe_get_cpu_pmd_regs(cpu); - ret = in_be64(&pmd_regs->pmsr) & 0x07; - - return ret; -} - diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c b/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c deleted file mode 100644 index 60a07a4f932..00000000000 --- a/arch/powerpc/platforms/cell/cbe_cpufreq_pmi.c +++ /dev/null @@ -1,156 +0,0 @@ -/* - * pmi backend for the cbe_cpufreq driver - * - * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 - * - * Author: Christian Krafft <krafft@de.ibm.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/kernel.h> -#include <linux/types.h> -#include <linux/timer.h> -#include <linux/module.h> -#include <linux/of_platform.h> - -#include <asm/processor.h> -#include <asm/prom.h> -#include <asm/pmi.h> -#include <asm/cell-regs.h> - -#ifdef DEBUG -#include <asm/time.h> -#endif - -#include "cbe_cpufreq.h" - -static u8 pmi_slow_mode_limit[MAX_CBE]; - -bool cbe_cpufreq_has_pmi = false; -EXPORT_SYMBOL_GPL(cbe_cpufreq_has_pmi); - -/* - * hardware specific functions - */ - -int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode) -{ - int ret; - pmi_message_t pmi_msg; -#ifdef DEBUG - long time; -#endif - pmi_msg.type = PMI_TYPE_FREQ_CHANGE; - pmi_msg.data1 = cbe_cpu_to_node(cpu); - pmi_msg.data2 = pmode; - -#ifdef DEBUG - time = jiffies; -#endif - pmi_send_message(pmi_msg); - -#ifdef DEBUG - time = jiffies - time; - time = jiffies_to_msecs(time); - pr_debug("had to wait %lu ms for a transition using " \ - "PMI\n", time); -#endif - ret = pmi_msg.data2; - pr_debug("PMI returned slow mode %d\n", ret); - - return ret; -} -EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi); - - -static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg) -{ - u8 node, slow_mode; - - BUG_ON(pmi_msg.type != PMI_TYPE_FREQ_CHANGE); - - node = pmi_msg.data1; - slow_mode = pmi_msg.data2; - - pmi_slow_mode_limit[node] = slow_mode; - - pr_debug("cbe_handle_pmi: node: %d max_freq: %d\n", node, slow_mode); -} - -static int pmi_notifier(struct notifier_block *nb, - unsigned long event, void *data) -{ - struct cpufreq_policy *policy = data; - struct cpufreq_frequency_table *cbe_freqs; - u8 node; - - /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE - * and CPUFREQ_NOTIFY policy events?) - */ - if (event == CPUFREQ_START) - return 0; - - cbe_freqs = cpufreq_frequency_get_table(policy->cpu); - node = cbe_cpu_to_node(policy->cpu); - - pr_debug("got notified, event=%lu, node=%u\n", event, node); - - if (pmi_slow_mode_limit[node] != 0) { - pr_debug("limiting node %d to slow mode %d\n", - node, pmi_slow_mode_limit[node]); - - cpufreq_verify_within_limits(policy, 0, - - cbe_freqs[pmi_slow_mode_limit[node]].frequency); - } - - return 0; -} - -static struct notifier_block pmi_notifier_block = { - .notifier_call = pmi_notifier, -}; - -static struct pmi_handler cbe_pmi_handler = { - .type = PMI_TYPE_FREQ_CHANGE, - .handle_pmi_message = cbe_cpufreq_handle_pmi, -}; - - - -static int __init cbe_cpufreq_pmi_init(void) -{ - cbe_cpufreq_has_pmi = pmi_register_handler(&cbe_pmi_handler) == 0; - - if (!cbe_cpufreq_has_pmi) - return -ENODEV; - - cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - - return 0; -} - -static void __exit cbe_cpufreq_pmi_exit(void) -{ - cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); - pmi_unregister_handler(&cbe_pmi_handler); -} - -module_init(cbe_cpufreq_pmi_init); -module_exit(cbe_cpufreq_pmi_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>"); diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c index 94560db788b..2c15ff09448 100644 --- a/arch/powerpc/platforms/cell/cbe_thermal.c +++ b/arch/powerpc/platforms/cell/cbe_thermal.c @@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) { u64 reg_value; - int temp; + unsigned int temp; u64 new_value; int ret; diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index 5822141aa63..173568140a3 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c @@ -401,11 +401,11 @@ error: } else { if (config && *config) { size = 256; - free_bootmem((unsigned long)(*config), size); + free_bootmem(__pa(*config), size); } if (res && *res) { size = sizeof(struct celleb_pci_resource); - free_bootmem((unsigned long)(*res), size); + free_bootmem(__pa(*res), size); } } @@ -472,7 +472,7 @@ int __init celleb_setup_phb(struct pci_controller *phb) { struct device_node *dev = phb->dn; const struct of_device_id *match; - struct celleb_phb_spec *phb_spec; + const struct celleb_phb_spec *phb_spec; int rc; match = of_match_node(celleb_phb_match, dev); diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c index 14be2bd358b..4278acfa2ed 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c +++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c @@ -486,7 +486,6 @@ static __init int celleb_setup_pciex(struct device_node *node, struct pci_controller *phb) { struct resource r; - struct of_irq oirq; int virq; /* SMMIO registers; used inside this file */ @@ -507,12 +506,11 @@ static __init int celleb_setup_pciex(struct device_node *node, phb->ops = &scc_pciex_pci_ops; /* internal interrupt handler */ - if (of_irq_map_one(node, 1, &oirq)) { + virq = irq_of_parse_and_map(node, 1); + if (!virq) { pr_err("PCIEXC:Failed to map irq\n"); goto error; } - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); if (request_irq(virq, pciex_handle_internal_irq, 0, "pciex", (void *)phb)) { pr_err("PCIEXC:Failed to request irq\n"); diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c index 3a16c5b3c46..c8eb5719382 100644 --- a/arch/powerpc/platforms/cell/celleb_scc_sio.c +++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c @@ -42,19 +42,18 @@ static struct { static int __init txx9_serial_init(void) { extern int early_serial_txx9_setup(struct uart_port *port); - struct device_node *node = NULL; + struct device_node *node; int i; struct uart_port req; - struct of_irq irq; + struct of_phandle_args irq; struct resource res; - while ((node = of_find_compatible_node(node, - "serial", "toshiba,sio-scc")) != NULL) { + for_each_compatible_node(node, "serial", "toshiba,sio-scc") { for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) { if (!(txx9_serial_bitmap & (1<<i))) continue; - if (of_irq_map_one(node, i, &irq)) + if (of_irq_parse_one(node, i, &irq)) continue; if (of_address_to_resource(node, txx9_scc_tab[i].index, &res)) @@ -67,8 +66,7 @@ static int __init txx9_serial_init(void) #ifdef CONFIG_SERIAL_TXX9_CONSOLE req.membase = ioremap(req.mapbase, 0x24); #endif - req.irq = irq_create_of_mapping(irq.controller, - irq.specifier, irq.size); + req.irq = irq_create_of_mapping(&irq); req.flags |= UPF_IOREMAP | UPF_BUGGY_UART /*HAVE_CTS_LINE*/; req.uartclk = 83300000; diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 23bc9db4317..82607d621ac 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c @@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work) static void spu_gov_init_work(struct spu_gov_info_struct *info) { int delay = usecs_to_jiffies(info->poll_int); - INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); + INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); schedule_delayed_work_on(info->policy->cpu, &info->work, delay); } diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 96a433dd2d6..8a106b4172e 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -56,7 +56,7 @@ struct iic { static DEFINE_PER_CPU(struct iic, cpu_iic); #define IIC_NODE_COUNT 2 -static struct irq_host *iic_host; +static struct irq_domain *iic_host; /* Convert between "pending" bits and hw irq number */ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) @@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg) out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); } -struct irq_host *iic_get_irq_host(int node) +struct irq_domain *iic_get_irq_host(int node) { return iic_host; } @@ -215,20 +215,20 @@ void iic_request_IPIs(void) { iic_request_ipi(PPC_MSG_CALL_FUNCTION); iic_request_ipi(PPC_MSG_RESCHEDULE); - iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE); + iic_request_ipi(PPC_MSG_TICK_BROADCAST); iic_request_ipi(PPC_MSG_DEBUGGER_BREAK); } #endif /* CONFIG_SMP */ -static int iic_host_match(struct irq_host *h, struct device_node *node) +static int iic_host_match(struct irq_domain *h, struct device_node *node) { return of_device_is_compatible(node, "IBM,CBEA-Internal-Interrupt-Controller"); } -static int iic_host_map(struct irq_host *h, unsigned int virq, +static int iic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { switch (hw & IIC_IRQ_TYPE_MASK) { @@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int iic_host_xlate(struct irq_host *h, struct device_node *ct, +static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops iic_host_ops = { +static const struct irq_domain_ops iic_host_ops = { .match = iic_host_match, .map = iic_host_map, .xlate = iic_host_xlate, @@ -378,8 +378,8 @@ static int __init setup_iic(void) void __init iic_init_IRQ(void) { /* Setup an irq host data structure */ - iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, - &iic_host_ops, IIC_IRQ_INVALID); + iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, + NULL); BUG_ON(iic_host == NULL); irq_set_default_host(iic_host); diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index ae9fc7bc17d..2b90ff8a93b 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages, io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); - for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) + for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift) io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); mb(); @@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, { cell_iommu_setup_stab(iommu, base, size, 0, 0); iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, - IOMMU_PAGE_SHIFT); + IOMMU_PAGE_SHIFT_4K); cell_iommu_enable_hardware(iommu); } @@ -487,8 +487,10 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; - window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; - window->table.it_size = size >> IOMMU_PAGE_SHIFT; + window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; + window->table.it_offset = + (offset >> window->table.it_page_shift) + pte_offset; + window->table.it_size = size >> window->table.it_page_shift; iommu_init_table(&window->table, iommu->nid); @@ -518,7 +520,6 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); - window->table.it_hint = window->table.it_blocksize; return window; } @@ -551,9 +552,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) */ iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { - printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n", - dev->of_node ? dev->of_node->full_name : "?", - dev_to_node(dev)); + dev_err(dev, "iommu: missing iommu for %s (node %d)\n", + of_node_full_name(dev->of_node), dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); @@ -564,7 +564,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev) /* A coherent allocation implies strong ordering */ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag) + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) { if (iommu_fixed_is_weak) return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), @@ -572,18 +573,19 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, device_to_mask(dev), flag, dev_to_node(dev)); else - return dma_direct_ops.alloc_coherent(dev, size, dma_handle, - flag); + return dma_direct_ops.alloc(dev, size, dma_handle, flag, + attrs); } static void dma_fixed_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle) + void *vaddr, dma_addr_t dma_handle, + struct dma_attrs *attrs) { if (iommu_fixed_is_weak) iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, dma_handle); else - dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); + dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); } static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, @@ -642,8 +644,8 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask) static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); struct dma_map_ops dma_iommu_fixed_ops = { - .alloc_coherent = dma_fixed_alloc_coherent, - .free_coherent = dma_fixed_free_coherent, + .alloc = dma_fixed_alloc_coherent, + .free = dma_fixed_free_coherent, .map_sg = dma_fixed_map_sg, .unmap_sg = dma_fixed_unmap_sg, .dma_supported = dma_fixed_dma_supported, @@ -697,7 +699,7 @@ static int __init cell_iommu_get_window(struct device_node *np, unsigned long *base, unsigned long *size) { - const void *dma_window; + const __be32 *dma_window; unsigned long index; /* Use ibm,dma-window if available, else, hard code ! */ @@ -728,7 +730,7 @@ static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) nid, np->full_name); /* XXX todo: If we can have multiple windows on the same IOMMU, which - * isn't the case today, we probably want here to check wether the + * isn't the case today, we probably want here to check whether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the @@ -773,7 +775,7 @@ static void __init cell_iommu_init_one(struct device_node *np, /* Setup the iommu_table */ cell_iommu_setup_window(iommu, np, base, size, - offset >> IOMMU_PAGE_SHIFT); + offset >> IOMMU_PAGE_SHIFT_4K); } static void __init cell_disable_iommus(void) @@ -1122,7 +1124,7 @@ static int __init cell_iommu_fixed_mapping_init(void) cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, - IOMMU_PAGE_SHIFT); + IOMMU_PAGE_SHIFT_4K); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c index efdacc82957..d17e98bc0c1 100644 --- a/arch/powerpc/platforms/cell/pervasive.c +++ b/arch/powerpc/platforms/cell/pervasive.c @@ -42,11 +42,9 @@ static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; - /* - * We need to hard disable interrupts, the local_irq_enable() done by - * our caller upon return will hard re-enable. - */ - hard_irq_disable(); + /* Ensure our interrupt state is properly tracked */ + if (!prep_irq_for_idle()) + return; ctrl = mfspr(SPRN_CTRLF); @@ -81,6 +79,9 @@ static void cbe_power_save(void) */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); + + /* Re-enable interrupts in MSR */ + __hard_irq_enable(); } static int cbe_system_reset_exception(struct pt_regs *regs) diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c index 59c1a169410..348a27b1251 100644 --- a/arch/powerpc/platforms/cell/pmu.c +++ b/arch/powerpc/platforms/cell/pmu.c @@ -382,7 +382,7 @@ static int __init cbe_init_pm_irq(void) unsigned int irq; int rc, node; - for_each_node(node) { + for_each_online_node(node) { irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | (node << IIC_IRQ_NODE_SHIFT)); if (irq == NO_IRQ) { diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c index 7f9b6742f8b..6e3409d590a 100644 --- a/arch/powerpc/platforms/cell/qpace_setup.c +++ b/arch/powerpc/platforms/cell/qpace_setup.c @@ -61,7 +61,7 @@ static void qpace_progress(char *s, unsigned short hex) printk("*** %04x : %s\n", hex, s ? s : ""); } -static const struct of_device_id qpace_bus_ids[] __initdata = { +static const struct of_device_id qpace_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .type = "spider", }, diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d7..e865d748179 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, + area->pages = alloc_pages_exact_node(area->nid, + GFP_KERNEL|__GFP_THISNODE, area->order); if (!area->pages) { diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 62002a7edfe..6ae25fb6201 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -117,7 +117,7 @@ static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); -static int __devinit cell_setup_phb(struct pci_controller *phb) +static int cell_setup_phb(struct pci_controller *phb) { const char *model; struct device_node *np; @@ -140,7 +140,7 @@ static int __devinit cell_setup_phb(struct pci_controller *phb) return 0; } -static const struct of_device_id cell_bus_ids[] __initdata = { +static const struct of_device_id cell_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .type = "spider", }, @@ -197,7 +197,8 @@ static void __init mpic_init_IRQ(void) /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ - mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC "); + mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET, + 0, 0, " MPIC "); if (mpic == NULL) continue; mpic_init(mpic); diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index 4a255cf8cd1..c8017a7bcab 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c @@ -38,9 +38,9 @@ #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> -#include <asm/system.h> #include <asm/rtas.h> #include <asm/cputhreads.h> +#include <asm/code-patching.h> #include "interrupt.h" #include <asm/udbg.h> @@ -68,11 +68,11 @@ static cpumask_t of_spin_map; * 0 - failure * 1 - success */ -static inline int __devinit smp_startup_cpu(unsigned int lcpu) +static inline int smp_startup_cpu(unsigned int lcpu) { int status; - unsigned long start_here = __pa((u32)*((unsigned long *) - generic_secondary_smp_init)); + unsigned long start_here = + __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; @@ -109,7 +109,7 @@ static int __init smp_iic_probe(void) return cpumask_weight(cpu_possible_mask); } -static void __devinit smp_cell_setup_cpu(int cpu) +static void smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); @@ -120,7 +120,7 @@ static void __devinit smp_cell_setup_cpu(int cpu) mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } -static int __devinit smp_cell_kick_cpu(int nr) +static int smp_cell_kick_cpu(int nr) { BUG_ON(nr < 0 || nr >= NR_CPUS); @@ -137,25 +137,12 @@ static int __devinit smp_cell_kick_cpu(int nr) return 0; } -static int smp_cell_cpu_bootable(unsigned int nr) -{ - /* Special case - we inhibit secondary thread startup - * during boot if the user requests it. Odd-numbered - * cpus are assumed to be secondary threads. - */ - if (system_state < SYSTEM_RUNNING && - cpu_has_feature(CPU_FTR_SMT) && - !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) - return 0; - - return 1; -} static struct smp_ops_t bpa_iic_smp_ops = { .message_pass = iic_message_pass, .probe = smp_iic_probe, .kick_cpu = smp_cell_kick_cpu, .setup_cpu = smp_cell_setup_cpu, - .cpu_bootable = smp_cell_cpu_bootable, + .cpu_bootable = smp_generic_cpu_bootable, }; /* This is called very early */ diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 442c28c00f8..1f72f4ab635 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c @@ -62,7 +62,7 @@ enum { #define SPIDER_IRQ_INVALID 63 struct spider_pic { - struct irq_host *host; + struct irq_domain *host; void __iomem *regs; unsigned int node_id; }; @@ -148,7 +148,7 @@ static int spider_set_irq_type(struct irq_data *d, unsigned int type) /* Configure the source. One gross hack that was there before and * that I've kept around is the priority to the BE which I set to - * be the same as the interrupt source number. I don't know wether + * be the same as the interrupt source number. I don't know whether * that's supposed to make any kind of sense however, we'll have to * decide that, but for now, I'm not changing the behaviour. */ @@ -168,7 +168,7 @@ static struct irq_chip spider_pic = { .irq_set_type = spider_set_irq_type, }; -static int spider_host_map(struct irq_host *h, unsigned int virq, +static int spider_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); @@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq, return 0; } -static int spider_host_xlate(struct irq_host *h, struct device_node *ct, +static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) @@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct, return 0; } -static struct irq_host_ops spider_host_ops = { +static const struct irq_domain_ops spider_host_ops = { .map = spider_host_map, .xlate = spider_host_xlate, }; @@ -220,7 +220,7 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) /* For hooking up the cascace we have a problem. Our device-tree is * crap and we don't know on which BE iic interrupt we are hooked on at * least not the "standard" way. We can reconstitute it based on two - * informations though: which BE node we are connected to and wether + * informations though: which BE node we are connected to and whether * we are connected to IOIF0 or IOIF1. Right now, we really only care * about the IBM cell blade and we know that its firmware gives us an * interrupt-map property which is pretty strange. @@ -232,15 +232,12 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) int imaplen, intsize, unit; struct device_node *iic; - /* First, we check wether we have a real "interrupts" in the device + /* First, we check whether we have a real "interrupts" in the device * tree in case the device-tree is ever fixed */ - struct of_irq oirq; - if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) { - virq = irq_create_of_mapping(oirq.controller, oirq.specifier, - oirq.size); + virq = irq_of_parse_and_map(pic->host->of_node, 0); + if (virq) return virq; - } /* Now do the horrible hacks */ tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL); @@ -299,12 +296,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip, panic("spider_pic: can't map registers !"); /* Allocate a host */ - pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, - SPIDER_SRC_COUNT, &spider_host_ops, - SPIDER_IRQ_INVALID); + pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, + &spider_host_ops, pic); if (pic->host == NULL) panic("spider_pic: can't allocate irq host !"); - pic->host->host_data = pic; /* Go through all sources and disable them */ for (i = 0; i < SPIDER_SRC_COUNT; i++) { diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 8b1213993b1..f85db3a69b4 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -715,7 +715,7 @@ static ssize_t spu_stat_show(struct device *dev, spu->stats.libassist); } -static DEVICE_ATTR(stat, 0644, spu_stat_show, NULL); +static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); #ifdef CONFIG_KEXEC diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c index 75d613313f1..b0ec78e8ad6 100644 --- a/arch/powerpc/platforms/cell/spu_callbacks.c +++ b/arch/powerpc/platforms/cell/spu_callbacks.c @@ -60,13 +60,12 @@ long spu_sys_callback(struct spu_syscall_block *s) syscall = spu_syscall_table[s->nr_ret]; -#ifdef DEBUG - print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall); - printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n", - s->nr_ret, - s->parm[0], s->parm[1], s->parm[2], - s->parm[3], s->parm[4], s->parm[5]); -#endif + pr_debug("SPU-syscall " + "%pSR:syscall%lld(%llx, %llx, %llx, %llx, %llx, %llx)\n", + syscall, + s->nr_ret, + s->parm[0], s->parm[1], s->parm[2], + s->parm[3], s->parm[4], s->parm[5]); return syscall(s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c index 2bb6977c0a5..c3327f3d8cf 100644 --- a/arch/powerpc/platforms/cell/spu_manage.c +++ b/arch/powerpc/platforms/cell/spu_manage.c @@ -177,21 +177,20 @@ out: static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { - struct of_irq oirq; + struct of_phandle_args oirq; int ret; int i; for (i=0; i < 3; i++) { - ret = of_irq_map_one(np, i, &oirq); + ret = of_irq_parse_one(np, i, &oirq); if (ret) { pr_debug("spu_new: failed to get irq %d\n", i); goto err; } ret = -EINVAL; - pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0], - oirq.controller->full_name); - spu->irqs[i] = irq_create_of_mapping(oirq.controller, - oirq.specifier, oirq.size); + pr_debug(" irq %d no 0x%x on %s\n", i, oirq.args[0], + oirq.np->full_name); + spu->irqs[i] = irq_create_of_mapping(&oirq); if (spu->irqs[i] == NO_IRQ) { pr_debug("spu_new: failed to map it !\n"); goto err; @@ -200,7 +199,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) return 0; err: - pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, + pr_debug("failed to map irq %x for spu %s\n", *oirq.args, spu->name); for (; i >= 0; i--) { if (spu->irqs[i] != NO_IRQ) diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c index 714bbfc3162..5e6e0bad6db 100644 --- a/arch/powerpc/platforms/cell/spu_syscalls.c +++ b/arch/powerpc/platforms/cell/spu_syscalls.c @@ -25,6 +25,7 @@ #include <linux/module.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> +#include <linux/binfmts.h> #include <asm/spu.h> @@ -69,8 +70,6 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, umode_t, mode, int, neighbor_fd) { long ret; - struct file *neighbor; - int fput_needed; struct spufs_calls *calls; calls = spufs_calls_get(); @@ -78,11 +77,11 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, return -ENOSYS; if (flags & SPU_CREATE_AFFINITY_SPU) { + struct fd neighbor = fdget(neighbor_fd); ret = -EBADF; - neighbor = fget_light(neighbor_fd, &fput_needed); - if (neighbor) { - ret = calls->create_thread(name, flags, mode, neighbor); - fput_light(neighbor, fput_needed); + if (neighbor.file) { + ret = calls->create_thread(name, flags, mode, neighbor.file); + fdput(neighbor); } } else ret = calls->create_thread(name, flags, mode, NULL); @@ -94,8 +93,7 @@ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) { long ret; - struct file *filp; - int fput_needed; + struct fd arg; struct spufs_calls *calls; calls = spufs_calls_get(); @@ -103,16 +101,17 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) return -ENOSYS; ret = -EBADF; - filp = fget_light(fd, &fput_needed); - if (filp) { - ret = calls->spu_run(filp, unpc, ustatus); - fput_light(filp, fput_needed); + arg = fdget(fd); + if (arg.file) { + ret = calls->spu_run(arg.file, unpc, ustatus); + fdput(arg); } spufs_calls_put(calls); return ret; } +#ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { struct spufs_calls *calls; @@ -129,7 +128,7 @@ int elf_coredump_extra_notes_size(void) return ret; } -int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) +int elf_coredump_extra_notes_write(struct coredump_params *cprm) { struct spufs_calls *calls; int ret; @@ -138,12 +137,13 @@ int elf_coredump_extra_notes_write(struct file *file, loff_t *foffset) if (!calls) return 0; - ret = calls->coredump_extra_notes_write(file, foffset); + ret = calls->coredump_extra_notes_write(cprm); spufs_calls_put(calls); return ret; } +#endif void notify_spus_active(void) { @@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(register_spu_syscalls); void unregister_spu_syscalls(struct spufs_calls *calls) { BUG_ON(spufs_calls->owner != calls->owner); - rcu_assign_pointer(spufs_calls, NULL); + RCU_INIT_POINTER(spufs_calls, NULL); synchronize_rcu(); } EXPORT_SYMBOL_GPL(unregister_spu_syscalls); diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile index b9d5d678aa4..52a7d2596d3 100644 --- a/arch/powerpc/platforms/cell/spufs/Makefile +++ b/arch/powerpc/platforms/cell/spufs/Makefile @@ -1,8 +1,9 @@ obj-$(CONFIG_SPU_FS) += spufs.o -spufs-y += inode.o file.o context.o syscalls.o coredump.o +spufs-y += inode.o file.o context.o syscalls.o spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o spufs-y += switch.o fault.o lscsa_alloc.o +spufs-$(CONFIG_COREDUMP) += coredump.o # magic for the trace events CFLAGS_sched.o := -I$(src) diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 03c5fce2a5b..be6212ddbf0 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c @@ -27,6 +27,8 @@ #include <linux/gfp.h> #include <linux/list.h> #include <linux/syscalls.h> +#include <linux/coredump.h> +#include <linux/binfmts.h> #include <asm/uaccess.h> @@ -48,44 +50,6 @@ static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer, return ++ret; /* count trailing NULL */ } -/* - * These are the only things you should do on a core-file: use only these - * functions to write out all the necessary info. - */ -static int spufs_dump_write(struct file *file, const void *addr, int nr, loff_t *foffset) -{ - unsigned long limit = rlimit(RLIMIT_CORE); - ssize_t written; - - if (*foffset + nr > limit) - return -EIO; - - written = file->f_op->write(file, addr, nr, &file->f_pos); - *foffset += written; - - if (written != nr) - return -EIO; - - return 0; -} - -static int spufs_dump_align(struct file *file, char *buf, loff_t new_off, - loff_t *foffset) -{ - int rc, size; - - size = min((loff_t)PAGE_SIZE, new_off - *foffset); - memset(buf, 0, size); - - rc = 0; - while (rc == 0 && new_off > *foffset) { - size = min((loff_t)PAGE_SIZE, new_off - *foffset); - rc = spufs_dump_write(file, buf, size, foffset); - } - - return rc; -} - static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) { int i, sz, total = 0; @@ -106,6 +70,17 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) return total; } +static int match_context(const void *v, struct file *file, unsigned fd) +{ + struct spu_context *ctx; + if (file->f_op != &spufs_context_fops) + return 0; + ctx = SPUFS_I(file_inode(file))->i_ctx; + if (ctx->flags & SPU_CREATE_NOSCHED) + return 0; + return fd + 1; +} + /* * The additional architecture-specific notes for Cell are various * context files in the spu context. @@ -115,29 +90,18 @@ static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) * internal functionality to dump them without needing to actually * open the files. */ +/* + * descriptor table is not shared, so files can't change or go away. + */ static struct spu_context *coredump_next_context(int *fd) { - struct fdtable *fdt = files_fdtable(current->files); struct file *file; - struct spu_context *ctx = NULL; - - for (; *fd < fdt->max_fds; (*fd)++) { - if (!FD_ISSET(*fd, fdt->open_fds)) - continue; - - file = fcheck(*fd); - - if (!file || file->f_op != &spufs_context_fops) - continue; - - ctx = SPUFS_I(file->f_dentry->d_inode)->i_ctx; - if (ctx->flags & SPU_CREATE_NOSCHED) - continue; - - break; - } - - return ctx; + int n = iterate_fd(current->files, *fd, match_context, NULL); + if (!n) + return NULL; + *fd = n - 1; + file = fcheck(*fd); + return SPUFS_I(file_inode(file))->i_ctx; } int spufs_coredump_extra_notes_size(void) @@ -165,10 +129,10 @@ int spufs_coredump_extra_notes_size(void) } static int spufs_arch_write_note(struct spu_context *ctx, int i, - struct file *file, int dfd, loff_t *foffset) + struct coredump_params *cprm, int dfd) { loff_t pos = 0; - int sz, rc, nread, total = 0; + int sz, rc, total = 0; const int bufsz = PAGE_SIZE; char *name; char fullname[80], *buf; @@ -186,42 +150,39 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, en.n_descsz = sz; en.n_type = NT_SPU; - rc = spufs_dump_write(file, &en, sizeof(en), foffset); - if (rc) - goto out; + if (!dump_emit(cprm, &en, sizeof(en))) + goto Eio; - rc = spufs_dump_write(file, fullname, en.n_namesz, foffset); - if (rc) - goto out; + if (!dump_emit(cprm, fullname, en.n_namesz)) + goto Eio; - rc = spufs_dump_align(file, buf, roundup(*foffset, 4), foffset); - if (rc) - goto out; + if (!dump_align(cprm, 4)) + goto Eio; do { - nread = do_coredump_read(i, ctx, buf, bufsz, &pos); - if (nread > 0) { - rc = spufs_dump_write(file, buf, nread, foffset); - if (rc) - goto out; - total += nread; + rc = do_coredump_read(i, ctx, buf, bufsz, &pos); + if (rc > 0) { + if (!dump_emit(cprm, buf, rc)) + goto Eio; + total += rc; } - } while (nread == bufsz && total < sz); + } while (rc == bufsz && total < sz); - if (nread < 0) { - rc = nread; + if (rc < 0) goto out; - } - - rc = spufs_dump_align(file, buf, roundup(*foffset - total + sz, 4), - foffset); + if (!dump_skip(cprm, + roundup(cprm->written - total + sz, 4) - cprm->written)) + goto Eio; out: free_page((unsigned long)buf); return rc; +Eio: + free_page((unsigned long)buf); + return -EIO; } -int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) +int spufs_coredump_extra_notes_write(struct coredump_params *cprm) { struct spu_context *ctx; int fd, j, rc; @@ -233,7 +194,7 @@ int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset) return rc; for (j = 0; spufs_coredump_read[j].name != NULL; j++) { - rc = spufs_arch_write_note(ctx, j, file, fd, foffset); + rc = spufs_arch_write_note(ctx, j, cprm, fd); if (rc) { spu_release_saved(ctx); return rc; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 0cfece4cf6e..90986923a53 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -149,7 +149,6 @@ static int __fops ## _open(struct inode *inode, struct file *file) \ return spufs_attr_open(inode, file, __get, __set, __fmt); \ } \ static const struct file_operations __fops = { \ - .owner = THIS_MODULE, \ .open = __fops ## _open, \ .release = spufs_attr_release, \ .read = spufs_attr_read, \ @@ -352,7 +351,7 @@ static unsigned long spufs_get_unmapped_area(struct file *file, /* Else, try to obtain a 64K pages slice */ return slice_get_unmapped_area(addr, len, flags, - MMU_PAGE_64K, 1, 0); + MMU_PAGE_64K, 1); } #endif /* CONFIG_SPU_FS_64K_LS */ @@ -1852,7 +1851,7 @@ out: static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); int err = filemap_write_and_wait_range(inode->i_mapping, start, end); if (!err) { mutex_lock(&inode->i_mutex); @@ -2501,7 +2500,7 @@ static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int error = 0, cnt = 0; @@ -2571,7 +2570,7 @@ static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; unsigned int mask = 0; int rc; @@ -2591,7 +2590,6 @@ static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait) } static const struct file_operations spufs_switch_log_fops = { - .owner = THIS_MODULE, .open = spufs_switch_log_open, .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index d4a094ca96f..87ba7cf99cd 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -99,6 +99,7 @@ spufs_new_inode(struct super_block *sb, umode_t mode) if (!inode) goto out; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); @@ -151,7 +152,7 @@ static void spufs_evict_inode(struct inode *inode) { struct spufs_inode_info *ei = SPUFS_I(inode); - end_writeback(inode); + clear_inode(inode); if (ei->i_ctx) put_spu_context(ei->i_ctx); if (ei->i_gang) @@ -186,47 +187,31 @@ static void spufs_prune_dir(struct dentry *dir) static int spufs_rmdir(struct inode *parent, struct dentry *dir) { /* remove all entries */ + int res; spufs_prune_dir(dir); d_drop(dir); - - return simple_rmdir(parent, dir); + res = simple_rmdir(parent, dir); + /* We have to give up the mm_struct */ + spu_forget(SPUFS_I(dir->d_inode)->i_ctx); + return res; } static int spufs_fill_dir(struct dentry *dir, const struct spufs_tree_descr *files, umode_t mode, struct spu_context *ctx) { - struct dentry *dentry, *tmp; - int ret; - while (files->name && files->name[0]) { - ret = -ENOMEM; - dentry = d_alloc_name(dir, files->name); + int ret; + struct dentry *dentry = d_alloc_name(dir, files->name); if (!dentry) - goto out; + return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); if (ret) - goto out; + return ret; files++; } return 0; -out: - /* - * remove all children from dir. dir->inode is not set so don't - * just simply use spufs_prune_dir() and panic afterwards :) - * dput() looks like it will do the right thing: - * - dec parent's ref counter - * - remove child from parent's child list - * - free child's inode if possible - * - free child - */ - list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { - dput(dentry); - } - - shrink_dcache_parent(dir); - return ret; } static int spufs_dir_close(struct inode *inode, struct file *file) @@ -245,9 +230,6 @@ static int spufs_dir_close(struct inode *inode, struct file *file) mutex_unlock(&parent->i_mutex); WARN_ON(ret); - /* We have to give up the mm_struct */ - spu_forget(ctx); - return dcache_dir_close(inode, file); } @@ -256,7 +238,7 @@ const struct file_operations spufs_context_fops = { .release = spufs_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .readdir = dcache_readdir, + .iterate = dcache_readdir, .fsync = noop_fsync, }; EXPORT_SYMBOL_GPL(spufs_context_fops); @@ -269,10 +251,9 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, struct inode *inode; struct spu_context *ctx; - ret = -ENOSPC; inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); if (!inode) - goto out; + return -ENOSPC; if (dir->i_mode & S_ISGID) { inode->i_gid = dir->i_gid; @@ -280,65 +261,58 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, } ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ SPUFS_I(inode)->i_ctx = ctx; - if (!ctx) - goto out_iput; + if (!ctx) { + iput(inode); + return -ENOSPC; + } ctx->flags = flags; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; + + mutex_lock(&inode->i_mutex); + + dget(dentry); + inc_nlink(dir); + inc_nlink(inode); + + d_instantiate(dentry, inode); + if (flags & SPU_CREATE_NOSCHED) ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, mode, ctx); else ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); - if (ret) - goto out_free_ctx; - - if (spufs_get_sb_info(dir->i_sb)->debug) + if (!ret && spufs_get_sb_info(dir->i_sb)->debug) ret = spufs_fill_dir(dentry, spufs_dir_debug_contents, mode, ctx); if (ret) - goto out_free_ctx; + spufs_rmdir(dir, dentry); - d_instantiate(dentry, inode); - dget(dentry); - inc_nlink(dir); - inc_nlink(dentry->d_inode); - goto out; + mutex_unlock(&inode->i_mutex); -out_free_ctx: - spu_forget(ctx); - put_spu_context(ctx); -out_iput: - iput(inode); -out: return ret; } -static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) +static int spufs_context_open(struct path *path) { int ret; struct file *filp; ret = get_unused_fd(); - if (ret < 0) { - dput(dentry); - mntput(mnt); - goto out; - } + if (ret < 0) + return ret; - filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); + filp = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); - ret = PTR_ERR(filp); - goto out; + return PTR_ERR(filp); } filp->f_op = &spufs_context_fops; fd_install(ret, filp); -out: return ret; } @@ -373,7 +347,7 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, return ERR_PTR(-EINVAL); neighbor = get_spu_context( - SPUFS_I(filp->f_dentry->d_inode)->i_ctx); + SPUFS_I(file_inode(filp))->i_ctx); if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && @@ -453,29 +427,26 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, int affinity; struct spu_gang *gang; struct spu_context *neighbor; + struct path path = {.mnt = mnt, .dentry = dentry}; - ret = -EPERM; if ((flags & SPU_CREATE_NOSCHED) && !capable(CAP_SYS_NICE)) - goto out_unlock; + return -EPERM; - ret = -EINVAL; if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) == SPU_CREATE_ISOLATE) - goto out_unlock; + return -EINVAL; - ret = -ENODEV; if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) - goto out_unlock; + return -ENODEV; gang = NULL; neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { gang = SPUFS_I(inode)->i_gang; - ret = -EINVAL; if (!gang) - goto out_unlock; + return -EINVAL; mutex_lock(&gang->aff_mutex); neighbor = spufs_assert_affinity(flags, gang, aff_filp); if (IS_ERR(neighbor)) { @@ -495,27 +466,13 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, put_spu_context(neighbor); } - /* - * get references for dget and mntget, will be released - * in error path of *_open(). - */ - ret = spufs_context_open(dget(dentry), mntget(mnt)); - if (ret < 0) { + ret = spufs_context_open(&path); + if (ret < 0) WARN_ON(spufs_rmdir(inode, dentry)); - if (affinity) - mutex_unlock(&gang->aff_mutex); - mutex_unlock(&inode->i_mutex); - spu_forget(SPUFS_I(dentry->d_inode)->i_ctx); - goto out; - } out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); -out_unlock: - mutex_unlock(&inode->i_mutex); -out: - dput(dentry); return ret; } @@ -556,28 +513,27 @@ out: return ret; } -static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) +static int spufs_gang_open(struct path *path) { int ret; struct file *filp; ret = get_unused_fd(); - if (ret < 0) { - dput(dentry); - mntput(mnt); - goto out; - } + if (ret < 0) + return ret; - filp = dentry_open(dentry, mnt, O_RDONLY, current_cred()); + /* + * get references for dget and mntget, will be released + * in error path of *_open(). + */ + filp = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); - ret = PTR_ERR(filp); - goto out; + return PTR_ERR(filp); } filp->f_op = &simple_dir_operations; fd_install(ret, filp); -out: return ret; } @@ -585,25 +541,17 @@ static int spufs_create_gang(struct inode *inode, struct dentry *dentry, struct vfsmount *mnt, umode_t mode) { + struct path path = {.mnt = mnt, .dentry = dentry}; int ret; ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO); - if (ret) - goto out; - - /* - * get references for dget and mntget, will be released - * in error path of *_open(). - */ - ret = spufs_gang_open(dget(dentry), mntget(mnt)); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); + if (!ret) { + ret = spufs_gang_open(&path); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } } - -out: - mutex_unlock(&inode->i_mutex); - dput(dentry); return ret; } @@ -613,39 +561,32 @@ static struct file_system_type spufs_type; long spufs_create(struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { + struct inode *dir = path->dentry->d_inode; int ret; - ret = -EINVAL; /* check if we are on spufs */ if (path->dentry->d_sb->s_type != &spufs_type) - goto out; + return -EINVAL; /* don't accept undefined flags */ if (flags & (~SPU_CREATE_FLAG_ALL)) - goto out; + return -EINVAL; /* only threads can be underneath a gang */ - if (path->dentry != path->dentry->d_sb->s_root) { - if ((flags & SPU_CREATE_GANG) || - !SPUFS_I(path->dentry->d_inode)->i_gang) - goto out; - } + if (path->dentry != path->dentry->d_sb->s_root) + if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) + return -EINVAL; mode &= ~current_umask(); if (flags & SPU_CREATE_GANG) - ret = spufs_create_gang(path->dentry->d_inode, - dentry, path->mnt, mode); + ret = spufs_create_gang(dir, dentry, path->mnt, mode); else - ret = spufs_create_context(path->dentry->d_inode, - dentry, path->mnt, flags, mode, + ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, filp); if (ret >= 0) - fsnotify_mkdir(path->dentry->d_inode, dentry); - return ret; + fsnotify_mkdir(dir, dentry); -out: - mutex_unlock(&path->dentry->d_inode->i_mutex); return ret; } @@ -679,12 +620,16 @@ spufs_parse_options(struct super_block *sb, char *options, struct inode *root) case Opt_uid: if (match_int(&args[0], &option)) return 0; - root->i_uid = option; + root->i_uid = make_kuid(current_user_ns(), option); + if (!uid_valid(root->i_uid)) + return 0; break; case Opt_gid: if (match_int(&args[0], &option)) return 0; - root->i_gid = option; + root->i_gid = make_kgid(current_user_ns(), option); + if (!gid_valid(root->i_gid)) + return 0; break; case Opt_mode: if (match_octal(&args[0], &option)) @@ -757,9 +702,9 @@ spufs_create_root(struct super_block *sb, void *data) goto out_iput; ret = -ENOMEM; - sb->s_root = d_alloc_root(inode); + sb->s_root = d_make_root(inode); if (!sb->s_root) - goto out_iput; + goto out; return 0; out_iput: @@ -809,6 +754,7 @@ static struct file_system_type spufs_type = { .mount = spufs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("spufs"); static int __init spufs_init(void) { @@ -828,19 +774,19 @@ static int __init spufs_init(void) ret = spu_sched_init(); if (ret) goto out_cache; - ret = register_filesystem(&spufs_type); + ret = register_spu_syscalls(&spufs_calls); if (ret) goto out_sched; - ret = register_spu_syscalls(&spufs_calls); + ret = register_filesystem(&spufs_type); if (ret) - goto out_fs; + goto out_syscalls; spufs_init_isolated_loader(); return 0; -out_fs: - unregister_filesystem(&spufs_type); +out_syscalls: + unregister_spu_syscalls(&spufs_calls); out_sched: spu_sched_exit(); out_cache: diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 965d381abd7..4a0a64fe25d 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -24,6 +24,7 @@ #include <linux/errno.h> #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> @@ -82,7 +83,6 @@ static struct timer_list spuloadavg_timer; #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1) #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK)) -#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO) #define SCALE_PRIO(x, prio) \ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE) @@ -1094,7 +1094,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), - current->nsproxy->pid_ns->last_pid); + task_active_pid_ns(current)->last_pid); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 67852ade4c0..bcfd6f063ef 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -35,7 +35,6 @@ #define SPUFS_PS_MAP_SIZE 0x20000 #define SPUFS_MFC_MAP_SIZE 0x1000 #define SPUFS_CNTL_MAP_SIZE 0x1000 -#define SPUFS_CNTL_MAP_SIZE 0x1000 #define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE #define SPUFS_MSS_MAP_SIZE 0x1000 @@ -247,12 +246,13 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[]; /* system call implementation */ extern struct spufs_calls spufs_calls; +struct coredump_params; long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status); long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp); /* ELF coredump callbacks for writing SPU ELF notes */ extern int spufs_coredump_extra_notes_size(void); -extern int spufs_coredump_extra_notes_write(struct file *file, loff_t *foffset); +extern int spufs_coredump_extra_notes_write(struct coredump_params *cprm); extern const struct file_operations spufs_context_fops; diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 8591bb62d7f..a87200a535f 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c @@ -47,7 +47,7 @@ static long do_spu_run(struct file *filp, if (filp->f_op != &spufs_context_fops) goto out; - i = SPUFS_I(filp->f_path.dentry->d_inode); + i = SPUFS_I(file_inode(filp)); ret = spufs_run_spu(i->i_ctx, &npc, &status); if (put_user(npc, unpc)) @@ -66,13 +66,11 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, struct dentry *dentry; int ret; - dentry = user_path_create(AT_FDCWD, pathname, &path, 1); + dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); - mutex_unlock(&path.dentry->d_inode->i_mutex); - dput(dentry); - path_put(&path); + done_path_create(&path, dentry); } return ret; @@ -81,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, struct spufs_calls spufs_calls = { .create_thread = do_spu_create, .spu_run = do_spu_run, - .coredump_extra_notes_size = spufs_coredump_extra_notes_size, - .coredump_extra_notes_write = spufs_coredump_extra_notes_write, .notify_spus_active = do_notify_spus_active, .owner = THIS_MODULE, +#ifdef CONFIG_COREDUMP + .coredump_extra_notes_size = spufs_coredump_extra_notes_size, + .coredump_extra_notes_write = spufs_coredump_extra_notes_write, +#endif }; |
